text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import logging
import sys
from mesos.interface import Executor
from .messages import decode, encode
class ExecutorProxy(Executor):
"""Base class for Mesos executors.
Users' executors should extend this class to get default implementations of
methods they don't override.
"""
def __init__(self, executor):
self.executor = executor
def registered(self, driver, executorInfo, frameworkInfo, slaveInfo):
logging.info('Registered with slave', extra=dict())
return self.executor.on_registered(ExecutorDriverProxy(driver),
decode(executorInfo),
decode(frameworkInfo),
decode(slaveInfo))
def reregistered(self, driver, slaveInfo):
logging.info('Re-registered with slave', extra=dict())
return self.executor.on_reregistered(ExecutorDriverProxy(driver),
decode(slaveInfo))
def disconnected(self, driver):
logging.info('Disconnected from slave')
return self.executor.on_disconnected(ExecutorDriverProxy(driver))
def launchTask(self, driver, taskInfo):
logging.info('Launch task received')
return self.executor.on_launch(ExecutorDriverProxy(driver),
decode(taskInfo))
def killTask(self, driver, taskId):
logging.info('Kills task received')
return self.executor.on_kill(ExecutorDriverProxy(driver),
decode(taskId))
def frameworkMessage(self, driver, message):
logging.info('Recived framework message', extra=dict())
return self.executor.on_message(ExecutorDriverProxy(driver),
message)
def shutdown(self, driver):
logging.info('Executor shutdown received')
return self.executor.on_shutdown(ExecutorDriverProxy(driver))
def error(self, driver, message):
print("Error from Mesos: %s" % message, file=sys.stderr)
return self.executor.on_error(ExecutorDriverProxy(driver),
message)
class ExecutorDriverProxy(object):
def __init__(self, driver):
self.driver = driver
def start(self):
"""Starts the executor driver.
This needs to be called before any other driver calls are made.
"""
logging.info('Driver started')
return self.driver.start()
def stop(self):
"""Stops the executor driver."""
logging.info('Driver stopped')
return self.driver.stop()
def abort(self):
"""Aborts the driver so that no more callbacks can be made to the
executor.
The semantics of abort and stop have deliberately been separated so that
code can detect an aborted driver (i.e., via the return status of
ExecutorDriver.join), and instantiate and start another driver if
desired (from within the same process, although this functionality is
currently not supported for executors).
"""
logging.info('Driver aborted')
return self.driver.abort()
def join(self):
"""Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely.
The return status of this function can be used to determine if the
driver was aborted (see mesos.proto for a description of Status).
"""
logging.info('Joined to driver')
return self.driver.join()
def run(self):
"""Starts and immediately joins (i.e., blocks on) the driver."""
logging.info('Driver run')
return self.driver.run()
def update(self, status):
"""Sends a status update to the framework scheduler.
Retrying as necessary until an acknowledgement has been received or the
executor is terminated (in which case, a TASK_LOST status update will be
sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
logging.info('Executor sends status update {} for task {}'.format(
status.state, status.task_id))
return self.driver.sendStatusUpdate(encode(status))
def message(self, data):
"""Sends a message to the framework scheduler.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
logging.info('Driver sends framework message {}'.format(data))
return self.driver.sendFrameworkMessage(data)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/proxies/executor.py",
"copies": "1",
"size": "4733",
"license": "apache-2.0",
"hash": 23575756996156816,
"line_mean": 35.9765625,
"line_max": 80,
"alpha_frac": 0.628142827,
"autogenerated": false,
"ratio": 4.854358974358974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019290123456790122,
"num_lines": 128
} |
from __future__ import absolute_import, division, print_function
import logging
import sys
from mesos.interface import Scheduler
from .messages import Filters, decode, encode
class SchedulerProxy(Scheduler):
def __init__(self, scheduler):
self.scheduler = scheduler
def registered(self, driver, frameworkId, masterInfo):
logging.info('Registered with master')
return self.scheduler.on_registered(SchedulerDriverProxy(driver),
decode(frameworkId),
decode(masterInfo))
def reregistered(self, driver, masterInfo):
logging.info('Re-registered with master')
return self.scheduler.on_reregistered(SchedulerDriverProxy(driver),
decode(masterInfo))
def disconnected(self, driver):
logging.info('Disconnected from master')
return self.scheduler.on_disconnected(SchedulerDriverProxy(driver))
def resourceOffers(self, driver, offers):
logging.info('Got {} resource offers'.format(len(offers)))
return self.scheduler.on_offers(SchedulerDriverProxy(driver),
map(decode, offers))
def offerRescinded(self, driver, offerId):
logging.info('Offer {} rescinded'.format(offerId))
return self.scheduler.on_rescinded(SchedulerDriverProxy(driver),
decode(offerId))
def statusUpdate(self, driver, status):
logging.debug('Status update received with state {} for task {}'.format(
status.state, status.message))
return self.scheduler.on_update(SchedulerDriverProxy(driver),
decode(status))
def frameworkMessage(self, driver, executorId, slaveId, message):
logging.debug('Framework message received')
return self.scheduler.on_message(SchedulerDriverProxy(driver),
decode(executorId),
decode(slaveId),
message)
def slaveLost(self, driver, slaveId):
logging.debug('Slave has been lost, tasks should be rescheduled')
return self.scheduler.on_slave_lost(SchedulerDriverProxy(driver),
decode(slaveId))
def executorLost(self, driver, executorId, slaveId, state):
executor_id = decode(executorId)
slave_id = decode(slaveId)
logging.debug('Executor {} has been lost on {} with status {}'.format(
executor_id, slave_id, state))
return self.scheduler.on_executor_lost(SchedulerDriverProxy(driver),
executor_id, slave_id, state)
def error(self, driver, message):
print("Error from Mesos: %s " % message, file=sys.stderr)
return self.scheduler.on_error(SchedulerDriverProxy(driver), message)
class SchedulerDriverProxy(object):
"""Proxy Interface for Mesos scheduler drivers."""
def __init__(self, driver):
self.driver = driver
def start(self):
"""Starts the scheduler driver.
This needs to be called before any other driver calls are made.
"""
logging.info('Starts Scheduler Driver')
return self.driver.start()
def stop(self, failover=False):
"""Stops the scheduler driver.
If the 'failover' flag is set to False then it is expected that this
framework will never reconnect to Mesos and all of its executors and
tasks can be terminated. Otherwise, all executors and tasks will
remain running (for some framework specific failover timeout) allowing
the scheduler to reconnect (possibly in the same process, or from a
different process, for example, on a different machine.)
"""
logging.info('Stops Scheduler Driver')
return self.driver.stop(failover)
def abort(self):
"""Aborts the driver so that no more callbacks can be made to the
scheduler.
The semantics of abort and stop have deliberately been separated so that
code can detect an aborted driver (i.e., via the return status of
SchedulerDriver.join), and instantiate and start another driver if
desired (from within the same process.)
"""
logging.info('Aborts Scheduler Driver')
return self.driver.abort()
def join(self):
"""Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely.
The return status of this function can be used to determine if the
driver was aborted (see mesos.proto for a description of Status).
"""
logging.info('Joins Scheduler Driver')
return self.driver.join()
def request(self, requests):
"""Requests resources from Mesos.
(see mesos.proto for a description of Request and how, for example, to
request resources from specific slaves.)
Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously.
"""
logging.info('Request resources from Mesos')
return self.driver.requestResources(map(encode, requests))
def launch(self, offer_id, tasks, filters=Filters()):
"""Launches the given set of tasks.
Any resources remaining (i.e., not used by the tasks or their executors)
will be considered declined.
The specified filters are applied on all unused resources (see
mesos.proto for a description of Filters). Available resources are
aggregated when multiple offers are provided. Note that all offers must
belong to the same slave. Invoking this function with an empty
collection of tasks declines the offers in entirety (see
Scheduler.decline).
Note that passing a single offer is also supported.
"""
logging.info('Launches tasks {}'.format(tasks))
return self.driver.launchTasks(encode(offer_id),
map(encode, tasks),
encode(filters))
def kill(self, task_id):
"""Kills the specified task.
Note that attempting to kill a task is currently not reliable.
If, for example, a scheduler fails over while it was attempting to kill
a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
(these semantics may be changed in the future).
"""
logging.info('Kills task {}'.format(task_id))
return self.driver.killTask(encode(task_id))
def reconcile(self, statuses):
"""Allows the framework to query the status for non-terminal tasks.
This causes the master to send back the latest task status for each task
in 'statuses', if possible. Tasks that are no longer known will result
in a TASK_LOST update. If statuses is empty, then the master will send
the latest status for each task currently known.
"""
logging.info('Reconciles task statuses {}'.format(statuses))
return self.driver.reconcileTasks(map(encode, statuses))
def decline(self, offer_id, filters=Filters()):
"""Declines an offer in its entirety and applies the specified
filters on the resources (see mesos.proto for a description of
Filters).
Note that this can be done at any time, it is not necessary to do this
within the Scheduler::resourceOffers callback.
"""
logging.info('Declines offer {}'.format(offer_id))
return self.driver.declineOffer(encode(offer_id),
encode(filters)) # TODO filters
def accept(self, offer_ids, operations, filters=Filters()):
"""Accepts the given offers and performs a sequence of operations
on those accepted offers.
See Offer.Operation in mesos.proto for the set of available operations.
Available resources are aggregated when multiple offers are provided.
Note that all offers must belong to the same slave. Any unused resources
will be considered declined. The specified filters are applied on all
unused resources (see mesos.proto for a description of Filters).
"""
logging.info('Accepts offers {}'.format(offer_ids))
return self.driver.acceptOffers(map(encode, offer_ids),
map(encode, operations),
encode(filters))
def revive(self):
"""Removes all filters previously set by the framework (via
launchTasks()).
This enables the framework to receive offers from those filtered slaves.
"""
logging.info(
'Revives; removes all filters previously set by framework')
return self.driver.reviveOffers()
def suppress(self):
"""Inform Mesos master to stop sending offers to the framework.
The scheduler should call reviveOffers() to resume getting offers.
"""
logging.info('Suppress offers for framework')
return self.driver.suppressOffers()
def acknowledge(self, status):
"""Acknowledges the status update.
This should only be called once the status update is processed durably
by the scheduler.
Not that explicit acknowledgements must be requested via the constructor
argument, otherwise a call to this method will cause the driver to
crash.
"""
logging.info('Acknowledges status update {}'.format(status))
return self.driver.acknowledgeStatusUpdate(encode(status))
def message(self, executor_id, slave_id, message):
"""Sends a message from the framework to one of its executors.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
logging.info('Sends message `{}` to executor `{}` on slave `{}`'.format(
message, executor_id, slave_id))
return self.driver.sendFrameworkMessage(encode(executor_id),
encode(slave_id),
message)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/proxies/scheduler.py",
"copies": "1",
"size": "10465",
"license": "apache-2.0",
"hash": 9211640960720103000,
"line_mean": 41.8893442623,
"line_max": 80,
"alpha_frac": 0.6279980889,
"autogenerated": false,
"ratio": 4.910839981229469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.603883807012947,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from functools import wraps
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg
from .misc import DeferredMethod
__all__ = ['all_artists', 'new_artists', 'remove_artists', 'get_extent',
'view_cascade', 'fast_limits', 'defer_draw',
'color2rgb', 'point_contour']
def all_artists(fig):
"""
Build a set of all Matplotlib artists in a Figure
"""
return set(item
for axes in fig.axes
for container in [axes.collections, axes.patches, axes.lines,
axes.texts, axes.artists, axes.images]
for item in container)
def new_artists(fig, old_artists):
"""
Find the newly-added artists in a figure
:param fig: Matplotlib figure
:param old_artists: Return value from :func:all_artists
:returns: All artists added since all_artists was called
"""
return all_artists(fig) - old_artists
def remove_artists(artists):
"""
Remove a collection of matplotlib artists from a scene
:param artists: Container of artists
"""
for a in artists:
try:
a.remove()
except ValueError: # already removed
pass
def get_extent(view, transpose=False):
sy, sx = [s for s in view if isinstance(s, slice)]
if transpose:
return (sy.start, sy.stop, sx.start, sx.stop)
return (sx.start, sx.stop, sy.start, sy.stop)
def view_cascade(data, view):
""" Return a set of views progressively zoomed out of input at roughly
constant pixel count
:param data: Data object to view
:param view: Original view into data
:rtype: tuple of views
"""
shp = data.shape
v2 = list(view)
logging.debug("image shape: %s, view: %s", shp, view)
# choose stride length that roughly samples entire image
# at roughly the same pixel count
step = max(shp[i - 1] * v.step // max(v.stop - v.start, 1)
for i, v in enumerate(view) if isinstance(v, slice))
step = max(step, 1)
for i, v in enumerate(v2):
if not(isinstance(v, slice)):
continue
v2[i] = slice(0, shp[i - 1], step)
return tuple(v2), view
def _scoreatpercentile(values, percentile, limit=None):
# Avoid using the scipy version since it is available in Numpy
if limit is not None:
values = values[(values >= limit[0]) & (values <= limit[1])]
return np.percentile(values, percentile)
def fast_limits(data, plo, phi):
"""Quickly estimate percentiles in an array,
using a downsampled version
:param data: array-like
:param plo: Lo percentile
:param phi: High percentile
:rtype: Tuple of floats. Approximate values of each percentile in
data[component]
"""
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
values = np.asarray(data)[view]
if ~np.isfinite(values).any():
return (0.0, 1.0)
limits = (-np.inf, np.inf)
lo = _scoreatpercentile(values.flat, plo, limit=limits)
hi = _scoreatpercentile(values.flat, phi, limit=limits)
return lo, hi
def defer_draw(func):
"""
Decorator that globally defers all Agg canvas draws until
function exit.
If a Canvas instance's draw method is invoked multiple times,
it will only be called once after the wrapped function returns.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# don't recursively defer draws
if isinstance(FigureCanvasAgg.draw, DeferredMethod):
return func(*args, **kwargs)
try:
FigureCanvasAgg.draw = DeferredMethod(FigureCanvasAgg.draw)
result = func(*args, **kwargs)
finally:
FigureCanvasAgg.draw.execute_deferred_calls()
FigureCanvasAgg.draw = FigureCanvasAgg.draw.original_method
return result
wrapper._is_deferred = True
return wrapper
def color2rgb(color):
from matplotlib.colors import ColorConverter
result = ColorConverter().to_rgb(color)
return result
def point_contour(x, y, data):
"""Calculate the contour that passes through (x,y) in data
:param x: x location
:param y: y location
:param data: 2D image
:type data: :class:`numpy.ndarray`
Returns:
* A (nrow, 2column) numpy array. The two columns give the x and
y locations of the contour vertices
"""
try:
from scipy import ndimage
except ImportError:
raise ImportError("Image processing in Glue requires SciPy")
inten = data[y, x]
labeled, nr_objects = ndimage.label(data >= inten)
z = data * (labeled == labeled[y, x])
y, x = np.mgrid[0:data.shape[0], 0:data.shape[1]]
from matplotlib import _cntr
cnt = _cntr.Cntr(x, y, z)
xy = cnt.trace(inten)
if not xy:
return None
xy = xy[0]
return xy
| {
"repo_name": "JudoWill/glue",
"path": "glue/utils/matplotlib.py",
"copies": "1",
"size": "4963",
"license": "bsd-3-clause",
"hash": -4540102565890013000,
"line_mean": 26.8820224719,
"line_max": 76,
"alpha_frac": 0.6312714084,
"autogenerated": false,
"ratio": 3.7176029962546817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9848874404654682,
"avg_score": 0,
"num_lines": 178
} |
from __future__ import absolute_import, division, print_function
import logging
from mentos.connection import Connection
from mentos.exceptions import (BadRequest, BadSubscription,
ConnectionLost, ConnectionRefusedError,
DetectorClosed, FailedRetry, MasterRedirect,
NoLeadingMaster, OutBoundError)
from mentos.retry import RetryPolicy
from mentos.states import SessionStateMachine, States
from mentos.utils import MasterInfo, log_errors
from tornado import gen
from tornado.httpclient import HTTPError
from tornado.ioloop import IOLoop, PeriodicCallback
log = logging.getLogger(__name__)
class Event(object):
SUBSCRIBED = 'SUBSCRIBED'
HEARTBEAT = 'HEARTBEAT'
OFFERS = 'OFFERS'
RESCIND = 'RESCIND'
UPDATE = 'UPDATE'
MESSAGE = 'MESSAGE'
FAILURE = 'FAILURE'
ERROR = 'ERROR'
ACKNOWLEDGED = 'ACKNOWLEDGED'
SHUTDOWN = 'SHUTDOWN'
KILL = 'KILL'
LAUNCH_GROUP = 'LAUNCH_GROUP'
LAUNCH = 'LAUNCH'
RESCIND_INVERSE_OFFER = 'RESCIND_INVERSE_OFFER'
CLOSE = 'CLOSE'
OUTBOUND_SUCCESS = 'OUTBOUND_SUCCESS'
OUTBOUND_ERROR = 'OUTBOUND_ERROR'
class Message(object):
SUBSCRIBE = 'SUBSCRIBE'
UPDATE = 'UPDATE'
MESSAGE = 'MESSAGE'
ACCEPT = 'ACCEPT'
DECLINE = 'DECLINE'
REVIVE = 'REVIVE'
KILL = 'KILL'
SHUTDOWN = 'SHUTDOWN'
ACKNOWLEDGE = 'ACKNOWLEDGE'
RECONCILE = 'RECONCILE'
REQUEST = 'REQUEST'
class Subscription(object):
def __init__(self, framework, uri, api_path, event_handlers=None, principal=None, secret=None,
timeout=75, retry_policy=RetryPolicy.forever(), loop=None):
self.loop = loop or IOLoop.current()
self.connection = None
self.state = SessionStateMachine()
self.retry_policy = retry_policy
self.api_path = api_path
self.event_handlers = event_handlers or {}
self.closing = False
self.master_uri = uri
self.master_info = MasterInfo(self.master_uri)
self.mesos_stream_id = None
# TODO I dont like doing this
self.framework = framework
self.tasks = None
self.updates = None
self.timeout = timeout
self.principal = principal
self.secret = secret
@gen.coroutine
def ensure_safe(self, safe_states=[States.SUBSCRIBED, States.SUBSCRIBING]):
if self.state in safe_states:
return
yield self.state.wait_for(*safe_states)
@gen.coroutine
def ensure_subscribed(self):
while not getattr(self.connection, 'mesos_stream_id', None):
yield gen.sleep(0.1)
self.mesos_stream_id = self.connection.mesos_stream_id
if self.state.current_state == States.SUBSCRIBING:
self.state.transition_to(States.SUBSCRIBED)
@gen.coroutine
def start(self):
pc = PeriodicCallback(lambda: None, 1000, io_loop=self.loop)
self.loop.add_callback(pc.start)
self.loop.add_callback(self.subscription_loop)
yield self.ensure_safe()
@gen.coroutine
def subscription_loop(self):
with log_errors():
while not self.closing:
yield self.state.wait_for(States.CLOSED, States.SUSPENDED)
if self.closing: # pragma: no cover
break
yield self.detect_master()
self.loop.add_callback(self.ensure_subscribed)
yield self.subscribe()
@gen.coroutine
def detect_master(self):
conn = None
self.retry_policy = RetryPolicy.exponential_backoff(
maximum=self.timeout)
while not conn:
yield self.retry_policy.enforce()
try:
endpoint = yield self.master_info.get_endpoint()
except NoLeadingMaster: # pragma: no cover
self.connection = None
endpoint = None
except DetectorClosed:
self.connection = None
endpoint = None
if not endpoint: # pragma: no cover
yield self.retry_policy.enforce()
conn = yield self.make_connection(endpoint, self.api_path)
old_conn = self.connection
self.connection = conn
log.warn('Connected to %s' % endpoint)
if old_conn: # pragma: no cover
old_conn.close()
@gen.coroutine
def subscribe(self):
try:
self.state.transition_to(States.SUBSCRIBING)
request = {
'type': 'SUBSCRIBE',
'subscribe': {
'framework_info': self.framework
}
}
if 'id' in self.framework:
request['framework_id'] = self.framework['id']
if 'executor_id' in self.framework: # pragma: no cover
request['executor_id'] = self.framework['executor_id']
if self.tasks is not None: # pragma: no cover
request['subscribe']['unacknowledged_tasks'] = list(
self.tasks.values())
if self.updates is not None: # pragma: no cover
request['subscribe']['unacknowledged_updates'] = list(
self.updates.values())
yield self.connection.connect(request)
except ConnectionLost:
log.warn('Lost connection to the Master, will try to resubscribe')
self.connection.close()
self.connection = None
self.state.transition_to(States.SUSPENDED)
except BadSubscription:
log.warn('Bad Subscription request, aborting')
self.state.transition_to(States.CLOSED)
self.close()
@gen.coroutine
def make_connection(self, endpoint, api_path):
conn = Connection(endpoint, api_path, self._event_handler,
principal=self.principal, secret=self.secret)
try:
yield conn.ping()
except MasterRedirect as ex: # pragma: no cover
if ex.location == self.master_info.current_location:
log.warn('Leading Master not elected yet')
else: # pragma: no cover
log.warn('Master not leading')
self.master_info.redirected_uri(ex.location)
conn = None
except ConnectionRefusedError as ex: # pragma: no cover
conn = None
except Exception: # pragma: no cover
conn = None
raise gen.Return(conn)
@gen.coroutine
def send(self, request, retry_policy=RetryPolicy.n_times(3)):
response = None
# wait for safe state
yield self.state.wait_for(States.SUBSCRIBED)
errors = []
while not response:
try:
yield retry_policy.enforce(request)
yield self.ensure_safe()
if 'framework_id' not in request:
request['framework_id'] = self.framework['id']
response = yield self.connection.send(request)
except HTTPError as ex:
if ex.code == 400:
ex = BadRequest(ex.response.body)
log.debug('Bad request {request}, {ex}'.format(
request=request, ex=ex))
errors.append(ex)
except OutBoundError as ex: # pragma: no cover
# TODO question marc
# self.state.transition_to(States.SUSPENDED)
log.debug('Bad outbound message {} because {}'.format(
request, ex))
errors.append(ex)
except FailedRetry as ex:
log.error('Ran out of retries for {}, last error {}'.format(
request, errors[-1]))
self.outbound_error(OutBoundError(
self.connection.endpoint, request, errors))
break
else:
retry_policy.clear(request)
self.outbound_succes(request)
raise gen.Return(response)
def outbound_error(self, ex):
self._event_handler({'type': Event.OUTBOUND_ERROR,
'outbound_error': {'request': ex.request,
'endpoint': ex.endpoint,
'error': ex.errors}})
def outbound_succes(self, request):
self._event_handler({'type': Event.OUTBOUND_SUCCESS,
'outbound_success': {'request': request}})
def _event_handler(self, message):
try:
# Add special check to intercept framework_id
if message.get('type', None) == Event.SUBSCRIBED:
if 'framework_id' in message['subscribed']:
self.framework['id'] = message[
'subscribed']['framework_id']
# Add special check to ensure subscribed for executor
if (self.state.current_state == States.SUBSCRIBING and
'executor_info' in message['subscribed']): # pragma: no cover
self.state.transition_to(States.SUBSCRIBED)
if message['type'] in self.event_handlers:
_type = message['type']
log.debug('Got event of type %s from %s' %
(_type, self.master_info.info))
if _type == Event.HEARTBEAT:
self.event_handlers[_type](message)
elif _type == Event.SHUTDOWN: # pragma: no cover
self.event_handlers[_type]()
else:
self.event_handlers[_type](message[_type.lower()])
else: # pragma: no cover
log.warn('Unhandled event %s' % message)
except Exception as ex: # pragma: no cover
log.warn('Problem dispatching event %s' % message)
log.exception(ex)
def close(self):
log.debug('Closing Subscription')
self.closing = True
if self.master_info.detector:
log.debug('Closing Subscription Master Detector')
self.master_info.close()
if self.connection:
self.connection.close()
self.state.transition_to(States.CLOSED)
| {
"repo_name": "daskos/mentos",
"path": "mentos/subscription.py",
"copies": "2",
"size": "10312",
"license": "apache-2.0",
"hash": 6655123149445748000,
"line_mean": 34.5586206897,
"line_max": 98,
"alpha_frac": 0.5629363848,
"autogenerated": false,
"ratio": 4.3056367432150315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5868573128015032,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from bson.objectid import ObjectId
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
logger = logging.getLogger('mongo')
class Mongo(object):
"""A ThreadPoolExecutor-based MongoDB client"""
def __init__(self, host=None):
kwargs = {}
if host:
parts = host.split(':')
if len(parts) == 2:
kwargs['port'] = int(parts[1])
kwargs['host'] = parts[0]
self.client = MongoClient(**kwargs).file_catalog
self.executor = ThreadPoolExecutor(max_workers=10)
@run_on_executor
def find_files(self, query={}, limit=None, start=0):
if 'mongo_id' in query:
query['_id'] = query['mongo_id']
del query['mongo_id']
if '_id' in query and not isinstance(query['_id'], dict):
query['_id'] = ObjectId(query['_id'])
projection = ('_id', 'uid')
result = self.client.files.find(query, projection)
ret = []
# `limit` and `skip` are ignored by __getitem__:
# http://api.mongodb.com/python/current/api/pymongo/cursor.html#pymongo.cursor.Cursor.__getitem__
#
# Therefore, implement it manually:
end = None
if limit is not None:
end = start + limit
for row in result[start:end]:
row['mongo_id'] = str(row['_id'])
del row['_id']
ret.append(row)
return ret
@run_on_executor
def create_file(self, metadata):
result = self.client.files.insert_one(metadata)
if (not result) or (not result.inserted_id):
logger.warn('did not insert file')
raise Exception('did not insert new file')
return str(result.inserted_id)
@run_on_executor
def get_file(self, filters):
if 'mongo_id' in filters:
filters['_id'] = filters['mongo_id']
del filters['mongo_id']
if '_id' in filters and not isinstance(filters['_id'], dict):
filters['_id'] = ObjectId(filters['_id'])
ret = self.client.files.find_one(filters)
if ret and '_id' in ret:
ret['mongo_id'] = str(ret['_id'])
del ret['_id']
return ret
@run_on_executor
def update_file(self, metadata):
# don't change the original dict
metadata_cpy = metadata.copy()
if 'mongo_id' in metadata_cpy:
metadata_cpy['_id'] = metadata_cpy['mongo_id']
del metadata_cpy['mongo_id']
metadata_id = metadata_cpy['_id']
if not isinstance(metadata_id, dict):
metadata_id = ObjectId(metadata_id)
# _id cannot be updated. Remove _id
del metadata_cpy['_id']
result = self.client.files.update_one({'_id': metadata_id},
{'$set': metadata_cpy})
if result.modified_count is None:
logger.warn('Cannot determine if document has been modified since `result.modified_count` has the value `None`. `result.matched_count` is %s' % result.matched_count)
elif result.modified_count != 1:
logger.warn('updated %s files with id %r',
result.modified_count, metadata_id)
raise Exception('did not update')
@run_on_executor
def replace_file(self, metadata):
if 'mongo_id' in metadata:
metadata['_id'] = metadata['mongo_id']
del metadata['mongo_id']
metadata_id = metadata['_id']
if not isinstance(metadata_id, dict):
metadata_id = ObjectId(metadata_id)
# _id cannot be updated. Make a copy and remove _id
metadata_cpy = metadata.copy()
del metadata_cpy['_id']
result = self.client.files.replace_one({'_id': metadata_id},
metadata_cpy)
if result.modified_count is None:
logger.warn('Cannot determine if document has been modified since `result.modified_count` has the value `None`. `result.matched_count` is %s' % result.matched_count)
elif result.modified_count != 1:
logger.warn('updated %s files with id %r',
result.modified_count, metadata_id)
raise Exception('did not update')
@run_on_executor
def delete_file(self, filters):
if 'mongo_id' in filters:
filters['_id'] = filters['mongo_id']
del filters['mongo_id']
if '_id' in filters and not isinstance(filters['_id'], dict):
filters['_id'] = ObjectId(filters['_id'])
result = self.client.files.delete_one(filters)
if result.deleted_count != 1:
logger.warn('deleted %d files with filter %r',
result.deleted_count, filter)
raise Exception('did not delete')
| {
"repo_name": "JadonKrys/file_catalog",
"path": "file_catalog/mongo.py",
"copies": "1",
"size": "5024",
"license": "mit",
"hash": 407177369162005000,
"line_mean": 33.4109589041,
"line_max": 177,
"alpha_frac": 0.5700636943,
"autogenerated": false,
"ratio": 4.104575163398692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174638857698692,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import cloudpickle
from mesos.interface import mesos_pb2
from .proxies.messages import (CommandInfo, ContainerInfo, Cpus, Disk,
DockerInfo, Environment, ExecutorInfo, Mem,
TaskInfo, TaskStatus)
from .utils import remote_exception
class PickleMixin(object):
@property
def data(self):
return cloudpickle.loads(self['data'])
@data.setter
def data(self, value):
self['data'] = cloudpickle.dumps(value)
class PythonTaskStatus(PickleMixin, TaskStatus):
proto = mesos_pb2.TaskStatus(
labels=mesos_pb2.Labels(
labels=[mesos_pb2.Label(key='python')]))
def __init__(self, data=None, **kwargs):
super(PythonTaskStatus, self).__init__(**kwargs)
self.data = data
@property
def exception(self):
try:
return remote_exception(*self.data)
except:
return None
class PythonTask(PickleMixin, TaskInfo):
proto = mesos_pb2.TaskInfo(
labels=mesos_pb2.Labels(
labels=[mesos_pb2.Label(key='python')]))
def __init__(self, fn=None, args=[], kwargs={},
resources=[Cpus(0.1), Mem(128), Disk(0)],
command='python -m mentor.executor', envs={}, uris=[],
docker='daskos/mentor:latest', force_pull=False, retries=3,
**kwds):
super(PythonTask, self).__init__(**kwds)
self.status = PythonTaskStatus(task_id=self.id, state='TASK_STAGING')
self.executor = ExecutorInfo(
container=ContainerInfo(type='DOCKER',
docker=DockerInfo(network='HOST')),
command=CommandInfo(shell=True))
self.data = (fn, args, kwargs)
self.envs = envs
self.uris = uris
self.docker = docker
self.force_pull = force_pull
self.command = command
self.resources = resources
self.retries = retries
self.attempt = 1
@property
def uris(self):
return [uri.value for uri in self.executor.command.uris]
@uris.setter
def uris(self, value):
self.executor.command.uris = [{'value': v} for v in value]
@property
def envs(self):
envs = self.executor.command.environment.variables
return {env.name: env.value for env in envs}
@envs.setter
def envs(self, value):
envs = [{'name': k, 'value': v} for k, v in value.items()]
self.executor.command.environment = Environment(variables=envs)
@property
def command(self):
return self.executor.command.value
@command.setter
def command(self, value):
self.executor.command.value = value
@property
def docker(self):
return self.executor.container.docker.image
@docker.setter
def docker(self, value):
self.executor.container.docker.image = value
@property
def force_pull(self):
return self.executor.container.docker.force_pull_image
@force_pull.setter
def force_pull(self, value):
self.executor.container.docker.force_pull_image = value
def __call__(self):
fn, args, kwargs = self.data
return fn(*args, **kwargs)
def retry(self, status):
if self.attempt < self.retries:
logging.info('Task {} attempt #{} rescheduled due to failure with state '
'{} and message {}'.format(self.id, self.attempt,
status.state, status.message))
self.attempt += 1
status.state = 'TASK_STAGING'
else:
logging.error('Aborting due to task {} failed for {} attempts in state '
'{} with message {}'.format(self.id, self.retries,
status.state, status.message))
raise RuntimeError('Task {} failed with state {} and message {}'.format(
self.id, status.state, status.message))
def update(self, status):
assert isinstance(status, TaskStatus)
self.on_update(status)
if status.has_succeeded():
self.on_success(status)
elif status.has_failed():
self.on_fail(status)
def on_update(self, status):
self.status = status # update task's status
logging.info('Task {} has been updated with state {}'.format(
self.id.value, status.state))
def on_success(self, status):
logging.info('Task {} has been succeded'.format(self.id.value))
def on_fail(self, status):
logging.error('Task {} has been failed with state {} due to {}'.format(
self.id.value, status.state, status.message))
try:
raise status.exception # won't retry due to code error in PythonTaskStatus
except KeyError as e:
# not a code error, e.g. problem during deployment
self.retry(status)
else:
logging.error('Aborting due to task {} failed with state {} and message '
'{}'.format(self.id, status.state, status.message))
| {
"repo_name": "lensacom/satyr",
"path": "mentor/messages.py",
"copies": "1",
"size": "5225",
"license": "apache-2.0",
"hash": -4975351999436428000,
"line_mean": 32.2802547771,
"line_max": 87,
"alpha_frac": 0.5831578947,
"autogenerated": false,
"ratio": 4.143536875495639,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226694770195639,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import os
import time
import joblib
from joblib import Parallel, delayed
from TotalActivation.filters import hrf
from TotalActivation.process.temporal import wiener
from TotalActivation.process.spatial import tikhonov
from TotalActivation.preprocess.input import load_nifti, load_nifti_nomask, load_matlab_data, load_text_data
from TotalActivation.process.parallel import parallel_temporalTA
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
__all__ = ["TotalActivation"]
class TotalActivation(object):
def __init__(self, method_time='B', method_space='T', hrf='bold', Lambda=1 / 0.8095, cost_save=False):
# Method_time: 'B', 'S' or 'W'
# Method_space: 'S', 'T', None
# HRF: 'bold', 'spmhrf'
# Input parameters
self.method_time = method_time
self.method_space = method_space
self.hrf = hrf
self.Lambda = Lambda
self.cost_save = cost_save
self.n_jobs = -2
self.n_iter = 5
# Empty fields
self.data = None
self.atlas = None
self.deconvolved_ = None
def _load_data(self, f, a=None, mask=True, ftype='nifti', detrend=True, standardize=True, highpass=0.01,
lowpass=None,
TR=2):
"""
Wrapper for loading all kinds of data
:return: data or data + atlas in 2D
"""
if ftype is 'nifti':
if mask is False:
cmd = load_nifti_nomask
else:
cmd = load_nifti
self.data, self.data_masker, self.atlas, self.atlas_masker = cmd(f, a, mask, detrend=detrend,
standardize=standardize,
highpass=highpass,
lowpass=lowpass, TR=TR)
elif ftype is 'mat':
self.data, self.atlas = load_matlab_data(f, a)
elif ftype is 'txt':
self.data = load_text_data(f)
self.atlas = None
else:
raise ValueError("Data type not supported. Valid options are 'nifti', 'mat' or 'txt'")
self.n_voxels = self.data.shape[1]
self.n_tp = self.data.shape[0]
self.TR = TR
self._get_hrf_parameters()
def _get_hrf_parameters(self):
"""
Prepares a field with HRF parameters
:return:
"""
if self.hrf == 'bold':
a, psi = hrf.bold_parameters()
elif self.hrf == 'spmhrf':
a, psi = hrf.spmhrf_parameters()
else:
raise ValueError("HRF must be either bold or spmhrf")
if self.method_time is 'B':
self.hrfparams = hrf.block_filter(a, psi, self.TR)
self.t_iter = 500
elif self.method_time is 'S':
self.hrfparams = hrf.spike_filter(a, psi, self.TR)
self.t_iter = 200
elif self.method_time is 'W':
self.hrfparams = hrf.block_filter(a, psi, self.TR)
self.t_iter = 1
else:
raise ValueError('Method_time has to be B, S or W')
def _temporal(self, d):
"""
Temporal regularization.
"""
assert d is not None, "Cannot run anything without loaded data!"
if self.method_time is 'B' or self.method_time is 'S':
# _, coef = pywt.wavedec(d, 'db3', level=1, axis=0)
# lambda_temp = mad(coef) * self.config['Lambda']
voxels = np.arange(self.n_voxels)
tempmem = np.memmap('temp.mmap', dtype=float, shape=(self.n_tp, self.n_voxels), mode="w+")
if self.n_jobs < 0:
n_splits = joblib.cpu_count() + self.n_jobs + 1
else:
n_splits = self.n_jobs
Parallel(n_jobs=self.n_jobs)(
delayed(parallel_temporalTA)(d, tempmem, x, self.Lambda, self.hrfparams[0], self.hrfparams[2],
self.n_tp, self.t_iter, self.cost_save)
for x in np.array_split(voxels, n_splits))
self.deconvolved_ = tempmem
elif self.method_time is 'W':
self.deconvolved_ = wiener(d, self.hrfparams[0], self.Lambda, self.n_voxels, self.n_tp)
else:
print("Wrong temporal deconvolution method; must be B, S or W")
def _spatial(self, d, a):
"""
Spatial regularization.
"""
assert a is not None, "Cannot run spatial regularization without the atlas!"
if self.method_space is 'T':
self.deconvolved_ = tikhonov(d, a, self.data_masker, iter=self.s_iter)
else:
print("This spatial regularization method is not yet implemented")
def _deconvolve(self):
"""
Main method for deconvolution
:return:
"""
if self.method_space is None:
print("Temporal regularization...")
self.t_iter *= 5
t0 = time.time()
self._temporal(self.data)
print("Done in %d seconds!" % (time.time() - t0))
elif self.method_space is 'S':
print("Structured sparsity spatial regularization not yet implemented")
elif self.method_space is 'T':
self.s_iter = 100
TC_OUT = np.zeros_like(self.data)
xT = np.zeros_like(self.data)
xS = np.zeros_like(self.data)
t0 = time.time()
k = 0
while k < self.n_iter:
print("Iteration %d of %d" % (k + 1, self.n_iter))
print("Temporal...")
self._temporal(TC_OUT - xT + self.data)
xT += self.deconvolved_ - TC_OUT
print("Spatial...")
self._spatial(TC_OUT, TC_OUT - xS + self.data)
xS += self.deconvolved_ - TC_OUT
TC_OUT = 0.5 * xT + 0.5 * xS
k += 1
self.deconvolved_ = TC_OUT
print("Done in %d seconds!" % (time.time() - t0))
else:
raise ValueError("method_space must be S, T or None")
if __name__ == '__main__':
ta = TotalActivation()
| {
"repo_name": "kjurek/pyTotalActivation",
"path": "TotalActivation/TotalActivation.py",
"copies": "1",
"size": "6350",
"license": "mit",
"hash": 3232096165890489000,
"line_mean": 33.8901098901,
"line_max": 110,
"alpha_frac": 0.528976378,
"autogenerated": false,
"ratio": 3.6961583236321305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47251347016321305,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import scipy.io as sio
import time
import joblib
from joblib import Parallel, delayed
from TotalActivation.filters import hrf
from TotalActivation.process.temporal import wiener
from TotalActivation.process.spatial import tikhonov
from TotalActivation.preprocess.input import load_nifti, load_nifti_nomask, load_matlab_data, load_text_data
from TotalActivation.process.parallel import parallel_temporalTA
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
__all__ = ["TotalActivation"]
class TotalActivation(object):
def __init__(self, method_time='B', method_space='T', hrf='bold', Lambda=1 / 0.8095, cost_save=False):
# Method_time: 'B', 'S' or 'W'
# Method_space: 'S', 'T', None
# HRF: 'bold', 'spmhrf'
# Input parameters
self.method_time = method_time
self.method_space = method_space
self.hrf = hrf
self.Lambda = Lambda
self.cost_save = cost_save
self.n_jobs = -2
self.n_iter = 5
# Empty fields
self.data = None
self.atlas = None
self.deconvolved_ = None
def _load_data(self, f, a=None, mask=True, ftype='nifti', detrend=True, standardize=True, highpass=0.01,
lowpass=None,
TR=2):
"""
Wrapper for loading all kinds of data
:return: data or data + atlas in 2D
"""
if ftype is 'nifti':
if mask is True:
cmd = load_nifti
elif mask is False:
cmd = load_nifti_nomask
self.data, self.data_masker, self.atlas, self.atlas_masker = cmd(f, a, detrend=detrend,
standardize=standardize,
highpass=highpass,
lowpass=lowpass, TR=TR)
elif ftype is 'mat':
self.data, self.atlas = load_matlab_data(f, a)
elif ftype is 'txt':
self.data = load_text_data(f)
self.atlas = None
else:
raise ValueError("Data type not supported. Valid options are 'nifti', 'mat' or 'txt'")
self.n_voxels = self.data.shape[1]
self.n_tp = self.data.shape[0]
self.TR = TR
self._get_hrf_parameters()
def _get_hrf_parameters(self):
"""
Prepares a field with HRF parameters
:return:
"""
if self.hrf == 'bold':
a, psi = hrf.bold_parameters()
elif self.hrf == 'spmhrf':
a, psi = hrf.spmhrf_parameters()
else:
raise ValueError("HRF must be either bold or spmhrf")
if self.method_time is 'B':
self.hrfparams = hrf.block_filter(a, psi, self.TR)
self.t_iter = 500
elif self.method_time is 'S':
self.hrfparams = hrf.spike_filter(a, psi, self.TR)
self.t_iter = 200
elif self.method_time is 'W':
self.hrfparams = hrf.block_filter(a, psi, self.TR)
self.t_iter = 1
else:
raise ValueError('Method_time has to be B, S or W')
def _temporal(self, d):
"""
Temporal regularization.
"""
assert d is not None, "Cannot run anything without loaded data!"
if self.method_time is 'B' or self.method_time is 'S':
# _, coef = pywt.wavedec(d, 'db3', level=1, axis=0)
# lambda_temp = mad(coef) * self.config['Lambda']
voxels = np.arange(self.n_voxels)
tempmem = np.memmap('temp.mmap', dtype=float, shape=(self.n_tp, self.n_voxels), mode="w+")
if self.n_jobs < 0:
n_splits = joblib.cpu_count() + self.n_jobs + 1
else:
n_splits = self.n_jobs
Parallel(n_jobs=self.n_jobs)(
delayed(parallel_temporalTA)(d, tempmem, x, self.Lambda, self.hrfparams[0], self.hrfparams[2],
self.n_tp, self.t_iter, self.cost_save)
for x in np.array_split(voxels, n_splits))
self.deconvolved_ = tempmem
elif self.method_time is 'W':
self.deconvolved_ = wiener(d, self.hrfparams[0], self.Lambda, self.n_voxels, self.n_tp)
else:
print("Wrong temporal deconvolution method; must be B, S or W")
def _spatial(self, d, a):
"""
Spatial regularization.
"""
assert a is not None, "Cannot run spatial regularization without the atlas!"
if self.method_space is 'T':
self.deconvolved_ = tikhonov(d, a, self.data_masker, iter=self.s_iter)
else:
print("This spatial regularization method is not yet implemented")
def _deconvolve(self):
"""
Main method for deconvolution
:return:
"""
if self.method_space is None:
print("Temporal regularization...")
self.t_iter *= 5
t0 = time.time()
self._temporal(self.data)
print("Done in %d seconds!" % (time.time() - t0))
elif self.method_space is 'S':
print("Structured sparsity spatial regularization not yet implemented")
elif self.method_space is 'T':
self.s_iter = 100
TC_OUT = np.zeros_like(self.data)
xT = np.zeros_like(self.data)
xS = np.zeros_like(self.data)
t0 = time.time()
k = 0
while k < self.n_iter:
print("Iteration %d of %d" % (k + 1, self.n_iter))
print("Temporal...")
self._temporal(TC_OUT - xT + self.data)
xT += self.deconvolved_ - TC_OUT
print("Spatial...")
self._spatial(TC_OUT, TC_OUT - xS + self.data)
xS += self.deconvolved_ - TC_OUT
TC_OUT = 0.5 * xT + 0.5 * xS
k += 1
self.deconvolved_ = TC_OUT
print("Done in %d seconds!" % (time.time() - t0))
else:
raise ValueError("method_space must be S, T or None")
if __name__ == '__main__':
ta = TotalActivation()
| {
"repo_name": "mfalkiewicz/pyTotalActivation",
"path": "TotalActivation/TotalActivation.py",
"copies": "1",
"size": "6370",
"license": "mit",
"hash": 1589900313602606000,
"line_mean": 34,
"line_max": 110,
"alpha_frac": 0.5298273155,
"autogenerated": false,
"ratio": 3.697040046430644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9718970371454599,
"avg_score": 0.0015793980952089046,
"num_lines": 182
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
__all__ = ['Coordinates', 'WCSCoordinates']
class Coordinates(object):
'''
Base class for coordinate transformation
'''
def __init__(self):
pass
def pixel2world(self, *args):
return args
def world2pixel(self, *args):
return args
def axis_label(self, axis):
return "World %i" % axis
def dependent_axes(self, axis):
"""Return a tuple of which world-axes are non-indepndent
from a given pixel axis
The axis index is given in numpy ordering convention (note that
opposite the fits convention)
"""
return (axis,)
def __gluestate__(self, context):
return {} # no state
@classmethod
def __setgluestate__(cls, rec, context):
return cls()
class WCSCoordinates(Coordinates):
'''
Class for coordinate transformation based on the WCS FITS
standard. This class does not take into account
distortions.
References
----------
* Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061
* Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077
* Greisen, Calabretta, Valdes & Allen (2006), Astronomy and
Astrophysics, 446, 747
'''
def __init__(self, header, wcs=None):
super(WCSCoordinates, self).__init__()
from ..external.astro import WCS
self._header = header
wcs = wcs or WCS(header)
# update WCS interface if using old API
mapping = {'wcs_pix2world': 'wcs_pix2sky',
'wcs_world2pix': 'wcs_sky2pix',
'all_pix2world': 'all_pix2sky'}
for k, v in mapping.items():
if not hasattr(wcs, k):
setattr(wcs, k, getattr(wcs, v))
self._wcs = wcs
@property
def wcs(self):
return self._wcs
@property
def header(self):
return self._header
def dependent_axes(self, axis):
# if distorted, all bets are off
try:
if any([self._wcs.sip, self._wcs.det2im1, self._wcs.det2im2]):
return tuple(range(self._wcs.naxis))
except AttributeError:
pass
# here, axis is the index number in numpy convention
# we flip with [::-1] because WCS and numpy index
# conventions are reversed
pc = np.array(self._wcs.wcs.get_pc()[::-1, ::-1])
ndim = pc.shape[0]
pc[np.eye(ndim, dtype=np.bool)] = 0
axes = self._wcs.get_axis_types()[::-1]
# axes rotated
if pc[axis, :].any() or pc[:, axis].any():
return tuple(range(ndim))
# XXX can spectral still couple with other axes by this point??
if axes[axis].get('coordinate_type') != 'celestial':
return (axis,)
# in some cases, even the celestial coordinates are
# independent. We don't catch that here.
return tuple(i for i, a in enumerate(axes) if
a.get('coordinate_type') == 'celestial')
def __setstate__(self, state):
self.__dict__ = state
# wcs object doesn't seem to unpickle properly. reconstruct it
from ..external.astro import WCS
self._wcs = WCS(self._header)
def pixel2world(self, *pixel):
'''
Convert pixel to world coordinates, preserving input type/shape
:param args: xpix, ypix[, zpix]: scalars, lists, or Numpy arrays
The pixel coordinates to convert
*Returns*
xworld, yworld, [zworld]: scalars, lists or Numpy arrays
The corresponding world coordinates
'''
arrs = [np.asarray(p) for p in pixel]
pix = np.vstack(a.ravel() for a in arrs).T
result = tuple(self._wcs.wcs_pix2world(pix, 0).T)
for r, a in zip(result, arrs):
r.shape = a.shape
return result
def world2pixel(self, *world):
'''
Convert pixel to world coordinates, preserving input type/shape
:param world:
xworld, yworld[, zworld] : scalars, lists or Numpy arrays
The world coordinates to convert
*Returns*
xpix, ypix: scalars, lists, or Numpy arrays
The corresponding pixel coordinates
'''
arrs = [np.asarray(w) for w in world]
pix = np.vstack(a.ravel() for a in arrs).T
result = tuple(self._wcs.wcs_world2pix(pix, 0).T)
for r, a in zip(result, arrs):
r.shape = a.shape
return result
def axis_label(self, axis):
header = self._header
ndim = _get_ndim(header)
num = _get_ndim(header) - axis # number orientation reversed
ax = self._header.get('CTYPE%i' % num)
if ax is not None:
if len(ax) == 8 or '-' in ax: # assume standard format
ax = ax[:5].split('-')[0].title()
else:
ax = ax.title()
translate = dict(
Glon='Galactic Longitude',
Glat='Galactic Latitude',
Ra='Right Ascension',
Dec='Declination',
Velo='Velocity',
Freq='Frequency'
)
return translate.get(ax, ax)
return super(WCSCoordinates, self).axis_label(axis)
def __gluestate__(self, context):
return dict(header=self._wcs.to_header_string())
@classmethod
def __setgluestate__(cls, rec, context):
from ..external.astro import fits
return cls(fits.Header.fromstring(rec['header']))
def coordinates_from_header(header):
""" Convert a FITS header into a glue Coordinates object
:param header: Header to convert
:type header: :class:`astropy.io.fits.Header`
:rtype: :class:`~glue.core.coordinates.Coordinates`
"""
try:
return WCSCoordinates(header)
except Exception as e:
logging.getLogger(__name__).warn("\n\n*******************************\n"
"Encounted an error during WCS parsing. "
"Discarding world coordinates! "
"\n%s\n"
"*******************************\n\n" % e
)
return Coordinates()
def _get_ndim(header):
if 'NAXIS' in header:
return header['NAXIS']
if 'WCSAXES' in header:
return header['WCSAXES']
return None
def coordinates_from_wcs(wcs):
"""Convert a wcs object into a glue Coordinates object
:param wcs: The WCS object to use
:rtype: :class:`~glue.core.coordinates.Coordinates`
"""
from ..external.astro import fits
hdr_str = wcs.wcs.to_header()
hdr = fits.Header.fromstring(hdr_str)
try:
return WCSCoordinates(hdr, wcs)
except (AttributeError, TypeError) as e:
print(e)
return Coordinates()
def header_from_string(string):
"""
Convert a string to a FITS header
"""
from ..external.astro import fits
cards = []
for s in string.splitlines():
try:
l, r = s.split('=')
key = l.strip()
value = r.split('/')[0].strip()
try:
value = int(value)
except ValueError:
pass
except ValueError:
continue
cards.append(fits.Card(key, value))
return fits.Header(cards)
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/coordinates.py",
"copies": "1",
"size": "7531",
"license": "bsd-3-clause",
"hash": -2575206565231598600,
"line_mean": 28.8849206349,
"line_max": 82,
"alpha_frac": 0.5519851281,
"autogenerated": false,
"ratio": 3.9783412572636028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030326385363603,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from glue.utils import unbroadcast, broadcast_to
__all__ = ['Coordinates', 'WCSCoordinates', 'coordinates_from_header', 'coordinates_from_wcs']
class Coordinates(object):
'''
Base class for coordinate transformation
'''
def __init__(self):
pass
def pixel2world(self, *args):
"""
Convert pixel to world coordinates, preserving input type/shape.
Parameters
----------
*pixel : scalars lists, or Numpy arrays
The pixel coordinates (0-based) to convert
Returns
-------
*world : Numpy arrays
The corresponding world coordinates
"""
return args
def world2pixel(self, *args):
"""
Convert world to pixel coordinates, preserving input type/shape.
Parameters
----------
*world : scalars lists, or Numpy arrays
The world coordinates to convert
Returns
-------
*pixel : Numpy arrays
The corresponding pixel coordinates
"""
return args
# PY3: pixel2world_single_axis(self, *pixel, axis=None)
def pixel2world_single_axis(self, *pixel, **kwargs):
"""
Convert pixel to world coordinates, preserving input type/shape.
This is a wrapper around pixel2world which returns the result for just
one axis, and also determines whether the calculation can be sped up
if broadcasting is present in the input arrays.
Parameters
----------
*pixel : scalars lists, or Numpy arrays
The pixel coordinates (0-based) to convert
axis : int, optional
If only one axis is needed, it should be specified since the
calculation will be much more efficient.
Returns
-------
world : `numpy.ndarray`
The world coordinates for the requested axis
"""
# PY3: the following is needed for Python 2
axis = kwargs.get('axis', None)
if axis is None:
raise ValueError("axis needs to be set")
original_shape = pixel[0].shape
pixel_new = []
dep_axes = self.dependent_axes(axis)
for ip, p in enumerate(pixel):
if ip in dep_axes:
pixel_new.append(unbroadcast(p))
else:
pixel_new.append(p.flat[0])
pixel = np.broadcast_arrays(*pixel_new)
result = self.pixel2world(*pixel)
return broadcast_to(result[axis], original_shape)
def world2pixel_single_axis(self, *world, **kwargs):
"""
Convert world to pixel coordinates, preserving input type/shape.
This is a wrapper around world2pixel which returns the result for just
one axis, and also determines whether the calculation can be sped up
if broadcasting is present in the input arrays.
Parameters
----------
*world : scalars lists, or Numpy arrays
The world coordinates to convert
axis : int, optional
If only one axis is needed, it should be specified since the
calculation will be much more efficient.
Returns
-------
pixel : `numpy.ndarray`
The pixel coordinates for the requested axis
"""
# PY3: the following is needed for Python 2
axis = kwargs.get('axis', None)
if axis is None:
raise ValueError("axis needs to be set")
original_shape = world[0].shape
world_new = []
dep_axes = self.dependent_axes(axis)
for iw, w in enumerate(world):
if iw in dep_axes:
world_new.append(unbroadcast(w))
else:
world_new.append(w.flat[0])
world = np.broadcast_arrays(*world_new)
result = self.world2pixel(*world)
return broadcast_to(result[axis], original_shape)
def world_axis(self, data, axis):
"""
Find the world coordinates along a given dimension, and which for now
we center on the pixel origin.
Parameters
----------
data : `~glue.core.data.Data`
The data to compute the coordinate axis for (this is used to
determine the size of the axis)
axis : int
The axis to compute, in Numpy axis order
Notes
-----
This method computes the axis values using pixel positions at the
center of the data along all other axes. This will therefore only give
the correct result for non-dependent axes (which can be checked using
the ``dependent_axes`` method).
"""
pixel = []
for i, s in enumerate(data.shape):
if i == axis:
pixel.append(np.arange(data.shape[axis]))
else:
pixel.append(np.repeat((s - 1) / 2, data.shape[axis]))
return self.pixel2world_single_axis(*pixel[::-1],
axis=data.ndim - 1 - axis)
def world_axis_unit(self, axis):
"""
Return the unit of the world coordinate given by ``axis`` (assuming the
Numpy axis order)
"""
return ''
def axis_label(self, axis):
return "World {}".format(axis)
def dependent_axes(self, axis):
"""Return a tuple of which world-axes are non-indepndent
from a given pixel axis
The axis index is given in numpy ordering convention (note that
opposite the fits convention)
"""
return (axis,)
def __gluestate__(self, context):
return {} # no state
@classmethod
def __setgluestate__(cls, rec, context):
return cls()
class WCSCoordinates(Coordinates):
'''
Class for coordinate transformation based on the WCS FITS
standard. This class does not take into account
distortions.
Parameters
----------
header : :class:`astropy.io.fits.Header`
FITS header (derived from WCS if not given)
wcs : :class:`astropy.wcs.WCS`
WCS object to use, if different from header
References
----------
* Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061
* Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077
* Greisen, Calabretta, Valdes & Allen (2006), Astronomy and
Astrophysics, 446, 747
'''
def __init__(self, header=None, wcs=None):
super(WCSCoordinates, self).__init__()
from astropy.wcs import WCS
if header is None and wcs is None:
raise ValueError('Must provide either FITS header or WCS or both')
if header is None:
header = wcs.to_header()
self._header = header
try:
naxis = header['NAXIS']
except (KeyError, TypeError):
naxis = None
wcs = wcs or WCS(header, naxis=naxis)
# update WCS interface if using old API
mapping = {'wcs_pix2world': 'wcs_pix2sky',
'wcs_world2pix': 'wcs_sky2pix',
'all_pix2world': 'all_pix2sky'}
for k, v in mapping.items():
if not hasattr(wcs, k):
setattr(wcs, k, getattr(wcs, v))
self._wcs = wcs
def world_axis_unit(self, axis):
return str(self._wcs.wcs.cunit[self._wcs.naxis - 1 - axis])
@property
def wcs(self):
return self._wcs
@property
def header(self):
return self._header
def dependent_axes(self, axis):
# TODO: we should cache this
# if distorted, all bets are off
try:
if any([self._wcs.sip, self._wcs.det2im1, self._wcs.det2im2]):
return tuple(range(self._wcs.naxis))
except AttributeError:
pass
# here, axis is the index number in numpy convention
# we flip with [::-1] because WCS and numpy index
# conventions are reversed
pc = np.array(self._wcs.wcs.get_pc()[::-1, ::-1])
ndim = pc.shape[0]
pc[np.eye(ndim, dtype=np.bool)] = 0
axes = self._wcs.get_axis_types()[::-1]
# axes rotated
if pc[axis, :].any() or pc[:, axis].any():
return tuple(range(ndim))
# XXX can spectral still couple with other axes by this point??
if axes[axis].get('coordinate_type') != 'celestial':
return (axis,)
# in some cases, even the celestial coordinates are
# independent. We don't catch that here.
return tuple(i for i, a in enumerate(axes) if
a.get('coordinate_type') == 'celestial')
def __setstate__(self, state):
self.__dict__ = state
# wcs object doesn't seem to unpickle properly. reconstruct it
from astropy.wcs import WCS
try:
naxis = self._header['NAXIS']
except (KeyError, TypeError):
naxis = None
self._wcs = WCS(self._header, naxis=naxis)
def pixel2world(self, *pixel):
# PY3: can just do pix2world(*pixel, 0)
return self._wcs.wcs_pix2world(*(tuple(pixel) + (0,)))
def world2pixel(self, *world):
# PY3: can just do world2pix(*world, 0)
return self._wcs.wcs_world2pix(*(tuple(world) + (0,)))
def axis_label(self, axis):
header = self._header
num = _get_ndim(header) - axis # number orientation reversed
ax = self._header.get('CTYPE%i' % num)
if ax is not None:
if len(ax) == 8 or '-' in ax: # assume standard format
ax = ax[:5].split('-')[0].title()
else:
ax = ax.title()
translate = dict(
Glon='Galactic Longitude',
Glat='Galactic Latitude',
Ra='Right Ascension',
Dec='Declination',
Velo='Velocity',
Freq='Frequency'
)
return translate.get(ax, ax)
return super(WCSCoordinates, self).axis_label(axis)
def __gluestate__(self, context):
return dict(header=self._wcs.to_header_string())
@classmethod
def __setgluestate__(cls, rec, context):
from astropy.io import fits
return cls(fits.Header.fromstring(rec['header']))
def coordinates_from_header(header):
"""
Convert a FITS header into a glue Coordinates object.
Parameters
----------
header : :class:`astropy.io.fits.Header`
Header to convert
Returns
-------
coordinates : :class:`~glue.core.coordinates.Coordinates`
"""
# We check whether the header contains at least CRVAL1 - if not, we would
# end up with a default WCS that isn't quite 1 to 1 (because of a 1-pixel
# offset) so better use Coordinates in that case.
from astropy.io.fits import Header
if isinstance(header, Header) and 'CRVAL1' in header:
try:
return WCSCoordinates(header)
except Exception as e:
logging.getLogger(__name__).warn(
"\n\n*******************************\n"
"Encounted an error during WCS parsing. "
"Discarding world coordinates! "
"\n{}\n"
"*******************************\n\n".format(str(e)))
return Coordinates()
def _get_ndim(header):
if 'NAXIS' in header:
return header['NAXIS']
if 'WCSAXES' in header:
return header['WCSAXES']
return None
def coordinates_from_wcs(wcs):
"""
Convert an Astropy WCS object into a glue Coordinates object.
Parameters
----------
wcs : :class:`astropy.wcs.WCS`
The WCS object to use
Returns
-------
coordinates : :class:`~glue.core.coordinates.Coordinates`
"""
from astropy.io import fits
hdr_str = wcs.wcs.to_header()
hdr = fits.Header.fromstring(hdr_str)
try:
return WCSCoordinates(hdr, wcs)
except (AttributeError, TypeError) as e:
print(e)
return Coordinates()
def header_from_string(string):
"""
Convert a string to a FITS header.
"""
from astropy.io import fits
return fits.Header.fromstring(string, sep='\n')
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/coordinates.py",
"copies": "2",
"size": "12260",
"license": "bsd-3-clause",
"hash": -4575898630275752400,
"line_mean": 29.1228501229,
"line_max": 94,
"alpha_frac": 0.5696574225,
"autogenerated": false,
"ratio": 4.180020456870099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5749677879370099,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
__all__ = ['Coordinates', 'WCSCoordinates']
class Coordinates(object):
'''
Base class for coordinate transformation
'''
def __init__(self):
pass
def pixel2world(self, *args):
return args
def world2pixel(self, *args):
return args
def world_axis(self, data, axis):
"""
Find the world coordinates along a given dimension, and which for now we
center on the pixel origin.
Parameters
----------
data : `~glue.core.data.Data`
The data to compute the coordinate axis for (this is used to
determine the size of the axis)
axis : int
The axis to compute, in Numpy axis order
Notes
-----
This method computes the axis values using pixel positions at the center
of the data along all other axes. This will therefore only give the
correct result for non-dependent axes (which can be checked using the
``dependent_axes`` method)
"""
pixel = []
for i, s in enumerate(data.shape):
if i == axis:
pixel.append(np.arange(data.shape[axis]))
else:
pixel.append(np.repeat((s - 1) / 2, data.shape[axis]))
return self.pixel2world(*pixel[::-1])[::-1][axis]
def world_axis_unit(self, axis):
"""
Return the unit of the world coordinate given by ``axis`` (assuming the
Numpy axis order)
"""
return ''
def axis_label(self, axis):
return "World %i" % axis
def dependent_axes(self, axis):
"""Return a tuple of which world-axes are non-indepndent
from a given pixel axis
The axis index is given in numpy ordering convention (note that
opposite the fits convention)
"""
return (axis,)
def __gluestate__(self, context):
return {} # no state
@classmethod
def __setgluestate__(cls, rec, context):
return cls()
class WCSCoordinates(Coordinates):
'''
Class for coordinate transformation based on the WCS FITS
standard. This class does not take into account
distortions.
References
----------
* Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061
* Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077
* Greisen, Calabretta, Valdes & Allen (2006), Astronomy and
Astrophysics, 446, 747
'''
def __init__(self, header, wcs=None):
super(WCSCoordinates, self).__init__()
from astropy.wcs import WCS
self._header = header
try:
naxis = header['NAXIS']
except (KeyError, TypeError):
naxis = None
wcs = wcs or WCS(header, naxis=naxis)
# update WCS interface if using old API
mapping = {'wcs_pix2world': 'wcs_pix2sky',
'wcs_world2pix': 'wcs_sky2pix',
'all_pix2world': 'all_pix2sky'}
for k, v in mapping.items():
if not hasattr(wcs, k):
setattr(wcs, k, getattr(wcs, v))
self._wcs = wcs
def world_axis_unit(self, axis):
return str(self._wcs.wcs.cunit[self._wcs.naxis - 1 - axis])
@property
def wcs(self):
return self._wcs
@property
def header(self):
return self._header
def dependent_axes(self, axis):
# if distorted, all bets are off
try:
if any([self._wcs.sip, self._wcs.det2im1, self._wcs.det2im2]):
return tuple(range(self._wcs.naxis))
except AttributeError:
pass
# here, axis is the index number in numpy convention
# we flip with [::-1] because WCS and numpy index
# conventions are reversed
pc = np.array(self._wcs.wcs.get_pc()[::-1, ::-1])
ndim = pc.shape[0]
pc[np.eye(ndim, dtype=np.bool)] = 0
axes = self._wcs.get_axis_types()[::-1]
# axes rotated
if pc[axis, :].any() or pc[:, axis].any():
return tuple(range(ndim))
# XXX can spectral still couple with other axes by this point??
if axes[axis].get('coordinate_type') != 'celestial':
return (axis,)
# in some cases, even the celestial coordinates are
# independent. We don't catch that here.
return tuple(i for i, a in enumerate(axes) if
a.get('coordinate_type') == 'celestial')
def __setstate__(self, state):
self.__dict__ = state
# wcs object doesn't seem to unpickle properly. reconstruct it
from astropy.wcs import WCS
try:
naxis = self._header['NAXIS']
except (KeyError, TypeError):
naxis = None
self._wcs = WCS(self._header, naxis=naxis)
def pixel2world(self, *pixel):
'''
Convert pixel to world coordinates, preserving input type/shape
:param args: xpix, ypix[, zpix]: scalars, lists, or Numpy arrays
The pixel coordinates to convert
*Returns*
xworld, yworld, [zworld]: scalars, lists or Numpy arrays
The corresponding world coordinates
'''
arrs = [np.asarray(p) for p in pixel]
pix = np.vstack(a.ravel() for a in arrs).T
result = tuple(self._wcs.wcs_pix2world(pix, 0).T)
for r, a in zip(result, arrs):
r.shape = a.shape
return result
def world2pixel(self, *world):
'''
Convert pixel to world coordinates, preserving input type/shape
:param world:
xworld, yworld[, zworld] : scalars, lists or Numpy arrays
The world coordinates to convert
*Returns*
xpix, ypix: scalars, lists, or Numpy arrays
The corresponding pixel coordinates
'''
arrs = [np.asarray(w) for w in world]
pix = np.vstack(a.ravel() for a in arrs).T
result = tuple(self._wcs.wcs_world2pix(pix, 0).T)
for r, a in zip(result, arrs):
r.shape = a.shape
return result
def axis_label(self, axis):
header = self._header
ndim = _get_ndim(header)
num = _get_ndim(header) - axis # number orientation reversed
ax = self._header.get('CTYPE%i' % num)
if ax is not None:
if len(ax) == 8 or '-' in ax: # assume standard format
ax = ax[:5].split('-')[0].title()
else:
ax = ax.title()
translate = dict(
Glon='Galactic Longitude',
Glat='Galactic Latitude',
Ra='Right Ascension',
Dec='Declination',
Velo='Velocity',
Freq='Frequency'
)
return translate.get(ax, ax)
return super(WCSCoordinates, self).axis_label(axis)
def __gluestate__(self, context):
return dict(header=self._wcs.to_header_string())
@classmethod
def __setgluestate__(cls, rec, context):
from astropy.io import fits
return cls(fits.Header.fromstring(rec['header']))
def coordinates_from_header(header):
""" Convert a FITS header into a glue Coordinates object
:param header: Header to convert
:type header: :class:`astropy.io.fits.Header`
:rtype: :class:`~glue.core.coordinates.Coordinates`
"""
# We check whether the header contains at least CRVAL1 - if not, we would
# end up with a default WCS that isn't quite 1 to 1 (because of a 1-pixel
# offset) so better use Coordinates in that case.
from astropy.io.fits import Header
if isinstance(header, Header) and 'CRVAL1' in header:
try:
return WCSCoordinates(header)
except Exception as e:
logging.getLogger(__name__).warn("\n\n*******************************\n"
"Encounted an error during WCS parsing. "
"Discarding world coordinates! "
"\n%s\n"
"*******************************\n\n" % e
)
return Coordinates()
def _get_ndim(header):
if 'NAXIS' in header:
return header['NAXIS']
if 'WCSAXES' in header:
return header['WCSAXES']
return None
def coordinates_from_wcs(wcs):
"""Convert a wcs object into a glue Coordinates object
:param wcs: The WCS object to use
:rtype: :class:`~glue.core.coordinates.Coordinates`
"""
from astropy.io import fits
hdr_str = wcs.wcs.to_header()
hdr = fits.Header.fromstring(hdr_str)
try:
return WCSCoordinates(hdr, wcs)
except (AttributeError, TypeError) as e:
print(e)
return Coordinates()
def header_from_string(string):
"""
Convert a string to a FITS header
"""
from astropy.io import fits
return fits.Header.fromstring(string, sep='\n')
| {
"repo_name": "saimn/glue",
"path": "glue/core/coordinates.py",
"copies": "1",
"size": "9098",
"license": "bsd-3-clause",
"hash": -8822891247049233000,
"line_mean": 29.7364864865,
"line_max": 86,
"alpha_frac": 0.5597933612,
"autogenerated": false,
"ratio": 4.0274457724656925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5087239133665693,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
try:
from plotly import plotly
except ImportError:
plotly = None
from glue.core.layout import Rectangle, snap_to_grid
SYM = {'o': 'circle', 's': 'square', '+': 'cross', '^': 'triangle-up',
'*': 'cross'}
def _data(layer, component):
"""
Extract the data associated with a Component
For categorical components, extracts the categories and not
the remapped integers
"""
result = layer[component]
comp = layer.data.get_component(component)
if comp.categorical:
result = comp.categories[result.astype(np.int)]
return result
def _sanitize(*arrs):
mask = np.ones(arrs[0].shape, dtype=np.bool)
for a in arrs:
try:
mask &= (~np.isnan(a))
except TypeError: # non-numeric dtype
pass
return tuple(a[mask].ravel() for a in arrs)
def _position_plots(viewers, layout):
rs = [Rectangle(v.position[0], v.position[1],
v.viewer_size[0], v.viewer_size[1])
for v in viewers]
right = max(r.x + r.w for r in rs)
top = max(r.y + r.h for r in rs)
for r in rs:
r.x = 1. * r.x / right
r.y = 1. - 1. * (r.y + r.h) / top
r.w = 1. * r.w / right
r.h = 1. * r.h / top
grid = snap_to_grid(rs, padding=0.05)
grid = dict((v, grid[r]) for v, r in zip(viewers, rs))
for i, plot in enumerate(viewers, 1):
g = grid[plot]
xdomain = [g.x, g.x + g.w]
ydomain = [g.y, g.y + g.h]
suffix = '' if i == 1 else str(i)
xax, yax = 'xaxis' + suffix, 'yaxis' + suffix
layout[xax].update(domain=xdomain, anchor=yax.replace('axis', ''))
layout[yax].update(domain=ydomain, anchor=xax.replace('axis', ''))
def _stack_horizontal(layout):
layout['xaxis']['domain'] = [0, 0.45]
layout['xaxis2']['domain'] = [0.55, 1]
layout['yaxis2']['anchor'] = 'x2'
def _grid_2x23(layout):
opts = {
'xaxis': {'domain': [0, 0.45]},
'yaxis': {'domain': [0, 0.45]},
'xaxis2': {"domain": [0.55, 1]},
'yaxis2': {"domain": [0, 0.45],
"anchor": "x2"
},
'xaxis3': {
"domain": [0, 0.45],
"anchor": "y3"
},
'yaxis3': {
"domain": [0.55, 1],
},
'xaxis4': {
"domain": [0.55, 1],
"anchor": "y4",
},
'yaxis4': {
"domain": [0.55, 1],
"anchor": "x4"
}
}
for k, v in opts.items():
if k not in layout:
continue
layout[k].update(**v)
def _axis(log=False, lo=0, hi=1, title='', categorical=False):
if log:
if lo < 0:
lo = 1e-3
if hi < 0:
hi = 1e-3
lo = np.log10(lo)
hi = np.log10(hi)
result = dict(type='log' if log else 'linear',
rangemode='normal',
range=[lo, hi], title=title)
if categorical:
result.pop('type')
# about 10 categorical ticks per graph
result['autotick'] = False
result['dtick'] = max(int(hi - lo) / 10, 1)
return result
def _fix_legend_duplicates(traces, layout):
"""Prevent repeat entries in the legend"""
seen = set()
for t in traces:
key = (t.get('name'), t.get('marker', {}).get('color'))
if key in seen:
t['showlegend'] = False
else:
seen.add(key)
def _color(style):
r, g, b, a = style.rgba
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
return 'rgba(%i, %i, %i, %0.1f)' % (r, g, b, a)
def export_scatter(viewer):
"""Export a scatter viewer to a list of
plotly-formatted data dictionaries"""
traces = []
xatt, yatt = viewer.xatt, viewer.yatt
xcat = ycat = False
for layer in viewer.layers:
if not layer.visible:
continue
l = layer.layer
xcat |= l.data.get_component(xatt).categorical
ycat |= l.data.get_component(yatt).categorical
marker = dict(symbol=SYM.get(l.style.marker, 'circle'),
color=_color(l.style),
size=l.style.markersize)
x, y = _sanitize(_data(l, xatt), _data(l, yatt))
trace = dict(x=x, y=y,
type='scatter',
mode='markers',
marker=marker,
name=l.label)
traces.append(trace)
xaxis = _axis(log=viewer.xlog, lo=viewer.xmin, hi=viewer.xmax,
title=viewer.xatt.label, categorical=xcat)
yaxis = _axis(log=viewer.ylog, lo=viewer.ymin, hi=viewer.ymax,
title=viewer.yatt.label, categorical=ycat)
return traces, xaxis, yaxis
def export_histogram(viewer):
traces = []
att = viewer.component
ymax = 1e-3
for artist in viewer.layers:
if not artist.visible:
continue
layer = artist.layer
x, y = _sanitize(artist.x[:-1], artist.y)
trace = dict(
name=layer.label,
type='bar',
marker=dict(color=_color(layer.style)),
x=x,
y=y)
traces.append(trace)
ymax = max(ymax, artist.y.max())
xlabel = att.label
xmin, xmax = viewer.xmin, viewer.xmax
if viewer.xlog:
xlabel = 'Log ' + xlabel
xmin = np.log10(xmin)
xmax = np.log10(xmax)
xaxis = _axis(lo=xmin, hi=xmax, title=xlabel)
yaxis = _axis(log=viewer.ylog, lo=0 if not viewer.ylog else 1e-3,
hi=ymax * 1.05)
return traces, xaxis, yaxis
def build_plotly_call(app):
args = []
layout = {'showlegend': True, 'barmode': 'overlay',
'title': 'Autogenerated by Glue'}
ct = 1
for tab in app.viewers:
for viewer in tab:
if hasattr(viewer, '__plotly__'):
p, xaxis, yaxis = viewer.__plotly__()
else:
assert type(viewer) in DISPATCH
p, xaxis, yaxis = DISPATCH[type(viewer)](viewer)
xaxis['zeroline'] = False
yaxis['zeroline'] = False
suffix = '' if ct == 1 else '%i' % ct
layout['xaxis' + suffix] = xaxis
layout['yaxis' + suffix] = yaxis
if ct > 1:
yaxis['anchor'] = 'x' + suffix
for item in p:
item['xaxis'] = 'x' + suffix
item['yaxis'] = 'y' + suffix
ct += 1
args.extend(p)
_position_plots([v for tab in app.viewers for v in tab], layout)
_fix_legend_duplicates(args, layout)
return [dict(data=args, layout=layout)], {}
def can_save_plotly(application):
"""
Check whether an application can be exported to plotly
Raises an exception if not
"""
if not plotly:
raise ValueError("Plotly Export requires the plotly python library. "
"Please install first")
for tab in application.viewers:
for viewer in tab:
if hasattr(viewer, '__plotly__'):
continue
if not isinstance(viewer, (ScatterWidget, HistogramWidget)):
raise ValueError("Plotly Export cannot handle viewer: %s"
% type(viewer))
if len(application.viewers) != 1:
raise ValueError("Plotly Export only supports a single tab. "
"Please close other tabs to export")
nplot = sum(len(t) for t in application.viewers)
if nplot == 0:
raise ValueError("Plotly Export requires at least one plot")
if nplot > 4:
raise ValueError("Plotly Export supports at most 4 plots")
def save_plotly(application):
"""
Save a Glue session to a plotly plot
This is currently restricted to 1-4 scatterplots or histograms
Parameters
----------
application : `~glue.core.application_base.Application`
Glue application to save
label : str
Label for the exported plot
"""
args, kwargs = build_plotly_call(application)
logging.getLogger(__name__).debug(args, kwargs)
# TODO: check what current GUI framework is
from glue.plugins.exporters.plotly.qt import QtPlotlyExporter
exporter = QtPlotlyExporter(plotly_args=args, plotly_kwargs=kwargs)
exporter.exec_()
DISPATCH = {}
try:
from glue.viewers.scatter.qt import ScatterWidget
from glue.viewers.histogram.qt import HistogramWidget
except ImportError:
pass
else:
DISPATCH[ScatterWidget] = export_scatter
DISPATCH[HistogramWidget] = export_histogram
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/exporters/plotly/export_plotly.py",
"copies": "2",
"size": "8679",
"license": "bsd-3-clause",
"hash": 6288467433573633000,
"line_mean": 26.8173076923,
"line_max": 77,
"alpha_frac": 0.5391174098,
"autogenerated": false,
"ratio": 3.5584255842558425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097542994055843,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import pytest
logger = logging.getLogger('pytest_catchlog.test.perf')
@pytest.fixture(autouse=True)
def bench_runtest(request, benchmark):
# Using benchmark.weave to patch a runtest hook doesn't seem to work with
# pytest 2.8.3; for some reason hook gets called more than once, before
# running benchmark cleanup finalizer, resulting in the
# "FixtureAlreadyUsed: Fixture can only be used once" error.
#
# Use plain old monkey patching instead.
ihook = request.node.ihook
saved_hook = ihook.pytest_runtest_call
def patched_hook(*args, **kwargs):
ihook.pytest_runtest_call = saved_hook # restore early
return benchmark(saved_hook, *args, **kwargs)
ihook.pytest_runtest_call = patched_hook
benchmark.group = 'runtest'
@pytest.yield_fixture # because 'caplog' is also a yield_fixture
def stub():
"""No-op stub used in place of 'caplog'.
Helps to measure the inevitable overhead of the pytest fixture injector to
let us exclude it later on.
"""
yield
def test_fixture_stub(stub):
logger.info('Testing %r hook performance: %s',
'catchlog', 'pure runtest hookwrapper overhead')
def test_caplog_fixture(caplog):
logger.info('Testing %r hook performance: %s',
'catchlog', 'hookwrapper + caplog fixture overhead')
| {
"repo_name": "eisensheng/pytest-catchlog",
"path": "tests/perf/bench/test_runtest_hook.py",
"copies": "1",
"size": "1422",
"license": "mit",
"hash": 701115982600465700,
"line_mean": 29.2553191489,
"line_max": 78,
"alpha_frac": 0.6933895921,
"autogenerated": false,
"ratio": 3.7421052631578946,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4935494855257894,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import workflows
class CommonTransport(object):
'''A common transport class, containing e.g. the logic to manage
subscriptions and transactions.'''
__callback_interceptor = None
__subscriptions = {}
__subscription_id = 0
__transactions = set()
__transaction_id = 0
log = logging.getLogger('workflows.transport')
#
# -- High level communication calls ----------------------------------------
#
@staticmethod
def connect():
'''Connect the transport class. This function must be overridden.
:return: True-like value when connection successful,
False-like value otherwise.'''
return False
@staticmethod
def is_connected():
'''Returns the current connection status. This function must be overridden.
:return: True-like value when connection is available,
False-like value otherwise.'''
return False
@staticmethod
def disconnect():
'''Gracefully disconnect the transport class. This function should be
overridden.'''
def subscribe(self, channel, callback, **kwargs):
'''Listen to a queue, notify via callback function.
:param channel: Queue name to subscribe to
:param callback: Function to be called when messages are received.
The callback will pass two arguments, the header as a
dictionary structure, and the message.
:param **kwargs: Further parameters for the transport layer. For example
disable_mangling: Receive messages as unprocessed strings.
exclusive: Attempt to become exclusive subscriber to the queue.
acknowledgement: If true receipt of each message needs to be
acknowledged.
:return: A unique subscription ID
'''
self.__subscription_id += 1
def mangled_callback(header, message):
return callback(header, self._mangle_for_receiving(message))
if 'disable_mangling' in kwargs:
if kwargs['disable_mangling']:
mangled_callback = callback
del kwargs['disable_mangling']
self.__subscriptions[self.__subscription_id] = {
'channel': channel,
'callback': mangled_callback,
'ack': kwargs.get('acknowledgement'),
'unsubscribed': False,
}
self.log.debug('Subscribing to %s with ID %d',
channel, self.__subscription_id)
self._subscribe(self.__subscription_id, channel, mangled_callback, **kwargs)
return self.__subscription_id
def unsubscribe(self, subscription, drop_callback_reference=False, **kwargs):
'''Stop listening to a queue or a broadcast
:param subscription: Subscription ID to cancel
:param drop_callback_reference: Drop the reference to the registered
callback function immediately. This
means any buffered messages still in
flight will not arrive at the intended
destination and cause exceptions to be
raised instead.
:param **kwargs: Further parameters for the transport layer.
'''
if subscription not in self.__subscriptions:
raise workflows.Error \
("Attempting to unsubscribe unknown subscription")
if self.__subscriptions[subscription]['unsubscribed']:
raise workflows.Error \
("Attempting to unsubscribe already unsubscribed subscription")
self._unsubscribe(subscription, **kwargs)
self.__subscriptions[subscription]['unsubscribed'] = True
if drop_callback_reference:
self.drop_callback_reference(subscription)
def drop_callback_reference(self, subscription):
'''Drop reference to the callback function after unsubscribing.
Any future messages arriving for that subscription will result in
exceptions being raised.
:param subscription: Subscription ID to delete callback reference for.
'''
if subscription not in self.__subscriptions:
raise workflows.Error \
("Attempting to drop callback reference for unknown subscription")
if not self.__subscriptions[subscription]['unsubscribed']:
raise workflows.Error \
("Attempting to drop callback reference for live subscription")
del self.__subscriptions[subscription]
def subscribe_broadcast(self, channel, callback, **kwargs):
'''Listen to a broadcast topic, notify via callback function.
:param channel: Topic name to subscribe to
:param callback: Function to be called when messages are received.
The callback will pass two arguments, the header as a
dictionary structure, and the message.
:param **kwargs: Further parameters for the transport layer. For example
disable_mangling: Receive messages as unprocessed strings.
retroactive: Ask broker to send old messages if possible
:return: A unique subscription ID
'''
self.__subscription_id += 1
def mangled_callback(header, message):
return callback(header, self._mangle_for_receiving(message))
if 'disable_mangling' in kwargs:
if kwargs['disable_mangling']:
mangled_callback = callback
del kwargs['disable_mangling']
self.__subscriptions[self.__subscription_id] = {
'channel': channel,
'callback': mangled_callback,
'ack': False,
'unsubscribed': False,
}
self.log.debug('Subscribing to broadcasts on %s with ID %d',
channel, self.__subscription_id)
self._subscribe_broadcast(self.__subscription_id, channel,
mangled_callback, **kwargs)
return self.__subscription_id
def subscription_callback(self, subscription):
'''Retrieve the callback function for a subscription. Raise a
workflows.Error if the subscription does not exist.
All transport callbacks can be intercepted by setting an
interceptor function with subscription_callback_intercept().
:param subscription: Subscription ID to look up
:return: Callback function
'''
subscription_record = self.__subscriptions.get(subscription)
if not subscription_record:
raise workflows.Error \
("Attempting to callback on unknown subscription")
callback = subscription_record['callback']
if self.__callback_interceptor:
return self.__callback_interceptor(callback)
return callback
def subscription_callback_set_intercept(self, interceptor):
'''Set a function to intercept all callbacks. This is useful to, for
example, keep a thread barrier between the transport related functions
and processing functions.
:param interceptor: A function that takes the original callback function
and returns a modified callback function. Or None to
disable interception.
'''
self.__callback_interceptor = interceptor
def send(self, destination, message, **kwargs):
'''Send a message to a queue.
:param destination: Queue name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
message = self._mangle_for_sending(message)
self._send(destination, message, **kwargs)
def raw_send(self, destination, message, **kwargs):
'''Send a raw (unmangled) message to a queue.
This may cause errors if the receiver expects a mangled message.
:param destination: Queue name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
self._send(destination, message, **kwargs)
def broadcast(self, destination, message, **kwargs):
'''Broadcast a message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
message = self._mangle_for_sending(message)
self._broadcast(destination, message, **kwargs)
def raw_broadcast(self, destination, message, **kwargs):
'''Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
self._broadcast(destination, message, **kwargs)
def ack(self, message, subscription_id=None, **kwargs):
'''Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be acknowledged, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
'''
if isinstance(message, dict):
message_id = message.get('message-id')
if not subscription_id:
subscription_id = message.get('subscription')
else:
message_id = message
if not message_id:
raise workflows.Error('Cannot acknowledge message without ' + \
'message ID')
if not subscription_id:
raise workflows.Error('Cannot acknowledge message without ' + \
'subscription ID')
self.log.debug('Acknowledging message %s on subscription %s',
message_id, subscription_id)
self._ack(message_id, subscription_id, **kwargs)
def nack(self, message, subscription_id=None, **kwargs):
'''Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
'''
if isinstance(message, dict):
message_id = message.get('message-id')
if not subscription_id:
subscription_id = message.get('subscription')
else:
message_id = message
if not message_id:
raise workflows.Error('Cannot reject message without ' + \
'message ID')
if not subscription_id:
raise workflows.Error('Cannot reject message without ' + \
'subscription ID')
self.log.debug('Rejecting message %s on subscription %s',
message_id, subscription_id)
self._nack(message_id, subscription_id, **kwargs)
def transaction_begin(self, **kwargs):
'''Start a new transaction.
:param **kwargs: Further parameters for the transport layer. For example
:return: A transaction ID that can be passed to other functions.
'''
self.__transaction_id += 1
self.__transactions.add(self.__transaction_id)
self.log.debug('Starting transaction with ID %d',
self.__subscription_id)
self._transaction_begin(self.__transaction_id, **kwargs)
return self.__transaction_id
def transaction_abort(self, transaction_id, **kwargs):
'''Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer.
'''
if transaction_id not in self.__transactions:
raise workflows.Error("Attempting to abort unknown transaction")
self.log.debug('Aborting transaction %s', transaction_id)
self.__transactions.remove(transaction_id)
self._transaction_abort(transaction_id, **kwargs)
def transaction_commit(self, transaction_id, **kwargs):
'''Commit a transaction.
:param transaction_id: ID of transaction to be committed.
:param **kwargs: Further parameters for the transport layer.
'''
if transaction_id not in self.__transactions:
raise workflows.Error("Attempting to commit unknown transaction")
self.log.debug('Committing transaction %s', transaction_id)
self.__transactions.remove(transaction_id)
self._transaction_commit(transaction_id, **kwargs)
#
# -- Low level communication calls to be implemented by subclass -----------
#
@staticmethod
def _subscribe(sub_id, channel, callback, **kwargs):
'''Listen to a queue, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Queue name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
exclusive: Attempt to become exclusive subscriber to the queue.
acknowledgement: If true receipt of each message needs to be
acknowledged.
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _subscribe_broadcast(sub_id, channel, callback, **kwargs):
'''Listen to a broadcast topic, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Topic name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
retroactive: Ask broker to send old messages if possible
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _unsubscribe(sub_id):
'''Stop listening to a queue or a broadcast
:param sub_id: ID for this subscription in the transport layer
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _send(destination, message, **kwargs):
'''Send a message to a queue.
:param destination: Queue name to send to
:param message: A string to be sent
:param **kwargs: Further parameters for the transport layer. For example
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _broadcast(destination, message, **kwargs):
'''Broadcast a message.
:param destination: Topic name to send to
:param message: A string to be broadcast
:param **kwargs: Further parameters for the transport layer. For example
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _ack(message_id, subscription_id, **kwargs):
'''Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged.
:param subscription_id: ID of the associated subscription.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _nack(message_id, subscription_id, **kwargs):
'''Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be rejected.
:param subscription_id: ID of the associated subscription.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _transaction_begin(transaction_id, **kwargs):
'''Start a new transaction.
:param transaction_id: ID for this transaction in the transport layer.
:param **kwargs: Further parameters for the transport layer.
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _transaction_abort(transaction_id, **kwargs):
'''Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer.
'''
raise NotImplementedError("Transport interface not implemented")
@staticmethod
def _transaction_commit(transaction_id, **kwargs):
'''Commit a transaction.
:param transaction_id: ID of transaction to be committed.
:param **kwargs: Further parameters for the transport layer.
'''
raise NotImplementedError("Transport interface not implemented")
#
# -- Internal message mangling functions -----------------------------------
#
# Some transport mechanisms will not be able to work with arbitrary objects,
# so these functions are used to prepare a message for sending/receiving.
# The canonical example is serialization/deserialization, see stomp_transport
@staticmethod
def _mangle_for_sending(message):
'''Function that any message will pass through before it being forwarded to
the actual _send* functions.'''
return message
@staticmethod
def _mangle_for_receiving(message):
'''Function that any message will pass through before it being forwarded to
the receiving subscribed callback functions.'''
return message
| {
"repo_name": "xia2/workflows",
"path": "workflows/transport/common_transport.py",
"copies": "1",
"size": "20093",
"license": "bsd-3-clause",
"hash": 5544261933753901000,
"line_mean": 44.7699316629,
"line_max": 80,
"alpha_frac": 0.661971831,
"autogenerated": false,
"ratio": 5.102336211274759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010371440175255714,
"num_lines": 439
} |
from __future__ import absolute_import, division, print_function
import logging
logger = logging.getLogger(__name__)
def _get_package(package, version_query, media_type, package_class):
"""
Fetch the package data from the datastore
and instantiate a :obj:`appr.models.package_base.PackageModelBase`
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0"
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`appr.kub_jsonnet.KubJsonnet`: :obj:`appr.models.package_base.PackageModelBase`
See Also:
* :obj:`appr.api.models.package_base.PackageModelBase`
* :obj:`appr.api.models.etcd.package.Package`
Raises:
:obj:`appr.exception.PackageNotFound`: package not found
:obj:`appr.exception.InvalidVersion`: version-query malformated
:obj:`appr.exception.PackageVersionNotFound`: version-query didn't match any release
"""
if version_query is None:
version_query = 'default'
p = package_class.get(package, version_query, media_type)
return p
def pull_blob(package, digest, blob_class):
blob = blob_class.get(package, digest)
resp = {
"package": package,
"blob": blob.b64blob,
"release": digest,
"filename": "%s_%s.tar.gz" % (package.replace("/", "_"), digest[0:8])}
return resp
def pull(package, version_query, media_type, package_class, blob_class):
"""
Retrives the package blob from the datastore
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0"
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`dict`: package data
* package: package name
* version: version that matched the version query
* filename: suggested filename to create the tarball
* blob: a `tar.gz` encoded in base64.
Example:
>>> appr.api.impl.registry.pull("coreos/etcd", version=">=3")
{
'blob': 'H4sICHDFvlcC/3RpdF9yb2NrZXRjaGF0XzEuMTAuMGt1Yi50YXIA7ZdRb5swEM',
'filename': u'coreos_etcd_3.0.1.tar.gz',
'package': 'coreos_etcd',
'version': u'3.0.1'
}
Raises:
:obj:`appr.exception.PackageNotFound`: package not found
:obj:`appr.exception.InvalidVersion`: version-query malformated
:obj:`appr.exception.PackageVersionNotFound`: version-query didn't match any release
See Also:
* :obj:`appr.api.registry.pull`
"""
packagemodel = _get_package(package, version_query, media_type, package_class=package_class)
blob = blob_class.get(package, packagemodel.digest)
resp = {
"package": package,
"blob": blob.b64blob,
"release": packagemodel.release,
"media_type": packagemodel.media_type,
"filename": "%s_%s.tar.gz" % (packagemodel.package.replace("/", "_"),
packagemodel.release)}
return resp
def push(package, release, media_type, blob, force, package_class, **kwargs):
"""
Push a new package release in the the datastore
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
release (:obj:`str`): the 'exact' package release (this is not a release_query)
blob (:obj:`str`): the package directory in `tar.gz` and encoded in base64
force (:obj:`boolean`): if the package exists already, overwrite it
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`dict`: push status
Example:
>>> appr.api.impl.registry.push("coreos/etcd", "3.0.1",
"H4sICHDFvlcC/3RpdF9yb2NrZXRjaGF0XzEuMTAuMGt1Yi50YXIA7ZdRb5swEM")
{
'status': u'ok'
}
Raises:
PackageAlreadyExists: if package already exists and `force` is False
See Also:
* :obj:`appr.api.registry.push`
"""
p = package_class(package, release, media_type, blob, kwargs.get('metadata', None))
p.save(force=force, **kwargs)
return {"status": "ok"}
def search(query, package_class, **kwargs):
"""
Search packages
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`list`: list of package names
Example:
>>> appr.api.impl.registry.search("etcd")
[
'coreos/etcd',
'ant31/etcd',
'my-etcd/stable'
]
See Also:
* :obj:`appr.api.registry.search`
"""
return package_class.search(query, **kwargs)
def list_packages(namespace, package_class, **kwargs):
"""
List all packages, filters can be applied
Must have at least a release to be visible
Todos:
- sort_by: name, created_at, downloads, number of stars
- filter_by: users
Args:
namespace (:obj:`str`): returns packages from the `namespace` only
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`list of dict`: list packages
* name: package name
* available_releases (list of str): All releases
* created_at (datetime, optional): package creation date
* downloads (int, optional): number of downloads
* release: release name
Example:
>>> appr.api.impl.registry.list_packages()
[
{
'available_releases': ['0.1.0'],
'name': u'quentinm/rados-gateway',
'release': '0.1.0',
'created_at": "2016-04-22T11:58:34.103Z",
'downloads': 41
},
{
'available_releases': ['0.1.0'],
'name': u'quentinm/nova',
'release': '0.1.0'
},
]
See Also:
* :obj:`appr.api.registry.list_packages`
"""
resp = package_class.all(namespace, **kwargs)
return resp
def show_package(package, release, media_type, channel_class, package_class):
"""
Returns package details
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
release (:obj:`str`): the 'exact' package release (this is not a release_query)
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`dict`: package data
* release (str)
* name (str)
* created_at (str)
* digest (str)
* channels (list)
* available_releases (list)
* dependencies (list)
* variables (dict)
* manifest (str)
Example:
>>> appr.api.impl.registry.show_package("ns/mypackage")
{
"release": "3.2.0-rc",
"name": "ns/mypackage",
"created_at": "2016-08-25T10:16:16.366758",
"digest": "93de60f59238f9aebce5b9f8bc708e02a0d740364fcd4b185c6da7fc1cdfe1ba",
"channels": ['stable', 'beta'],
"available_releases": [
"3.2.0-rc"
"3.1.0",
"3.0.1"
],
"dependencies": [
"ns/dep1",
"ns/dep2",
"ns/dep3"
],
"variables": {
"replicas": 1,
"image": "ns/mypackage:latest",
"namespace": "default",
"cluster": "cluster.local",
"mail_url": "smtp://"
},
"manifest": "---...."
}
Raises:
:obj:`appr.exception.PackageNotFound`: package not found
:obj:`appr.exception.InvalidRelease`: release-query malformated
:obj:`appr.exception.PackageReleaseNotFound`: release-query didn't match any release
See Also:
* :obj:`appr.api.registry.show_package`
"""
packagemodel = _get_package(package, release, media_type, package_class)
# manifest = packagemodel.manifest()
# optional = {"manifest": packagemodel.packager.manifest,
# "variables": manifest.variables,
# "dependencies": manifest.dependencies}
response = {"channels": packagemodel.channels(channel_class)}
response.update(packagemodel.data)
return response
def show_package_releases(package, media_type, package_class):
return package_class.view_releases(package, media_type)
def show_package_manifests(package, release, package_class):
return package_class.view_manifests(package, release)
# CHANNELS
def list_channels(package, channel_class):
"""
List all channels for a given package
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
Returns:
:obj:`list of dict`: list channels:
* channel (str): channel name
* current (str): latest/default release associated to the channel
* releases (list): list channel's releases
Example:
>>> appr.api.impl.registry.list_channels("myns/package")
[{'channel': u'stable', 'current': '1.10.2', 'releases': [u'1.10.2']},
{'channel': u'dev', 'current': 2.0.0-beta, 'releases': [1.10.2, 2.0.0-beta]}]
See Also:
* :obj:`appr.api.registry.list_channels`
"""
channels = [c.to_dict() for c in channel_class.all(package)]
return channels
def show_channel(package, name, channel_class):
"""
Show channel info
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
name (:obj:`str`): channel name to inspect
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
Returns:
:obj:`dict`: channel info
* channel (str): channel name
* current (str): latest/default release associated to the channel
* releases (list): list channel's releases
Example:
>>> appr.api.impl.registry.list_channels("tit/rocketchat", 'dev')
{'channel': u'dev', 'current': '2.0.0-beta', 'releases': [u'1.10.2']}
Raises:
:obj:`appr.api.exception.ChannelNotFound`: channel not found
See Also:
* :obj:`appr.api.registry.show_channel`
"""
c = channel_class.get(name, package)
return c.to_dict()
def add_channel_release(package, name, release, channel_class, package_class):
"""
Add a package-release to a channel
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
name (:obj:`str`): channel name to inspect
release (:obj:`str`): package release to add
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`dict`: channel info
* channel (str): channel name
* current (str): latest/default release associated to the channel
* releases (list): list channel's releases
Example:
>>> appr.api.impl.registry.list_channels("tit/rocketchat", 'dev')
{'channel': u'dev', 'current': '2.0.0-beta', 'releases': [u'1.10.2']}
Raises:
:obj:`appr.api.exception.ChannelNotFound`: channel not found
See Also:
* :obj:`appr.api.registry.add_channel_release`
"""
channel = channel_class(name, package)
channel.add_release(release, package_class)
return channel.to_dict()
def delete_channel_release(package, name, release, channel_class, package_class):
"""
Remove a release from a channel
Args:
package (:obj:`str`): package name in the format "namespace/name" or "domain.com/name"
name (:obj:`str`): channel name to inspect
release (:obj:`str`): package release to add
channel_class (:obj:`appr.models.channel_base:ChannelBase`): the implemented Channel class to use
package_class (:obj:`appr.models.package_base:PackageBase`): the implemented Package class to use
Returns:
:obj:`dict`: channel info
* channel (str): channel name
* current (str): latest/default release associated to the channel
* releases (list): list channel's releases
Example:
>>> appr.api.impl.registry.delete_channel_release("tit/rocketchat", 'dev')
{'channel': u'dev', 'current': '2.0.0-beta', 'releases': [u'1.10.2']}
Raises:
:obj:`appr.api.exception.ChannelNotFound`: channel not found
See Also:
* :obj:`appr.api.registry.delete_channel_release`
"""
channel = channel_class.get(name, package)
channel.remove_release(release)
return {"status": "deleted", "package": package, "name": name, "release": release}
def delete_channel(package, name, channel_class):
channel = channel_class.get(name, package)
channel.delete()
return {"channel": channel.name, "package": package, "action": 'delete'}
def delete_package(package, release, media_type, package_class):
package_class.delete(package, release, media_type)
return {"status": "deleted", "package": package, "release": release}
| {
"repo_name": "app-registry/appr",
"path": "appr/api/impl/registry.py",
"copies": "2",
"size": "13391",
"license": "apache-2.0",
"hash": -4718026847860198000,
"line_mean": 32.3940149626,
"line_max": 103,
"alpha_frac": 0.6310955119,
"autogenerated": false,
"ratio": 3.644801306477953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009844323181539764,
"num_lines": 401
} |
from __future__ import absolute_import, division, print_function
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
# build the blaze namespace with selected functions
from . import catalog
from . import compute, io
from .objects.array import Array
from .objects.constructors import (
array, empty, ones, zeros)
from .compute.function import BlazeFunc
from .compute.eval import (
eval, append)
from .compute.elwise_eval import _elwise_eval
from .compute.ops.ufuncs import *
from .datadescriptor import (
DyND_DDesc, BLZ_DDesc, HDF5_DDesc, CSV_DDesc, JSON_DDesc, Stream_DDesc)
inf = float('inf')
nan = float('nan')
__version__ = '0.4.2-dev'
# If IPython is already loaded, register the Blaze catalog magic
import sys
if 'IPython' in sys.modules:
catalog.register_ipy_magic()
del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("BLZ version: %s" % blz.__version__)
print("Blosc version: %s (%s)" % blz.blosc_version())
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/__init__.py",
"copies": "1",
"size": "3565",
"license": "bsd-3-clause",
"hash": 7522279683670109000,
"line_mean": 32.9523809524,
"line_max": 78,
"alpha_frac": 0.6485273492,
"autogenerated": false,
"ratio": 3.6156186612576064,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4764146010457606,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
inf = float('inf')
nan = float('nan')
__version__ = '0.6.0-dev'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import dynd
import datashape
import blz
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("DyND version: %s / LibDyND %s" %
(dynd.__version__, dynd.__libdynd_version__))
print("BLZ version: %s" % blz.__version__)
print("Blosc version: %s (%s)" % blz.blosc_version())
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blz.detect_number_of_cores())
print("-=" * 38)
def test(verbosity=1, xunitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses nose tests to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbosity : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
xunitfile : string, optional
If provided, writes the test results to an xunit
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import nose
import os
import sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask nose to do its thing
return nose.main(argv=argv, exit=exit)
| {
"repo_name": "aterrel/blaze",
"path": "blaze/__init__.py",
"copies": "1",
"size": "3124",
"license": "bsd-3-clause",
"hash": 1866299684361960000,
"line_mean": 32.5913978495,
"line_max": 78,
"alpha_frac": 0.6302816901,
"autogenerated": false,
"ratio": 3.65807962529274,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.478836131539274,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
from operator import getitem
import uuid
import numpy as np
import pandas as pd
from pandas.core.categorical import is_categorical_dtype
from toolz import merge
from .core import DataFrame, Series, _Frame, _concat
from ..base import tokenize
from ..context import _globals
from ..utils import digit, insert, M
def set_index(df, index, npartitions=None, shuffle=None, compute=True,
drop=True, upsample=1.0, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order.
This shuffles and repartitions your data. If done in parallel the
resulting order is non-deterministic.
"""
if (isinstance(index, Series) and index._name == df.index._name):
return df
if isinstance(index, (DataFrame, tuple, list)):
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index))
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
._repartition_quantiles(npartitions, upsample=upsample)
.compute()).tolist()
return set_partition(df, index, divisions, shuffle=shuffle, drop=drop,
**kwargs)
def set_partition(df, index, divisions, max_branch=32, drop=True, shuffle=None,
compute=None):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
drop: bool, default True
Whether to delete columns to be used as the new index
shuffle: str (optional)
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
max_branch: int (optional)
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
See Also
--------
set_index
shuffle
partd
"""
if np.isscalar(index):
partitions = df[index].map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
else:
partitions = index.map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions, _index=index)
df3 = rearrange_by_column(df2, '_partitions', max_branch=max_branch,
npartitions=len(divisions) - 1, shuffle=shuffle,
compute=compute)
if np.isscalar(index):
df4 = df3.map_partitions(set_index_post_scalar, index_name=index,
drop=drop, column_dtype=df.columns.dtype)
else:
df4 = df3.map_partitions(set_index_post_series, index_name=index.name,
drop=drop, column_dtype=df.columns.dtype)
df4.divisions = divisions
return df4.map_partitions(M.sort_index)
def shuffle(df, index, shuffle=None, npartitions=None, max_branch=32,
compute=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
shuffle_disk
shuffle_tasks
"""
if not isinstance(index, _Frame):
index = df[index]
partitions = index.map_partitions(partitioning_index,
npartitions=npartitions or df.npartitions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
df3 = rearrange_by_column(df2, '_partitions', npartitions=npartitions,
max_branch=max_branch, shuffle=shuffle,
compute=compute)
df4 = df3.drop('_partitions', axis=1, dtype=df.columns.dtype)
return df4
def rearrange_by_divisions(df, column, divisions, max_branch=None, shuffle=None):
""" Shuffle dataframe so that column separates along divisions """
partitions = df[column].map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
df3 = rearrange_by_column(df2, '_partitions', max_branch=max_branch,
npartitions=len(divisions) - 1, shuffle=shuffle)
return df3.drop('_partitions', axis=1, dtype=df.columns.dtype)
def rearrange_by_column(df, col, npartitions=None, max_branch=None,
shuffle=None, compute=None):
shuffle = shuffle or _globals.get('shuffle', 'disk')
if shuffle == 'disk':
return rearrange_by_column_disk(df, col, npartitions, compute=compute)
elif shuffle == 'tasks':
if npartitions is not None and npartitions < df.npartitions:
raise ValueError("Must create as many or more partitions in shuffle")
return rearrange_by_column_tasks(df, col, max_branch, npartitions)
else:
raise NotImplementedError("Unknown shuffle method %s" % shuffle)
class maybe_buffered_partd(object):
"""If serialized, will return non-buffered partd. Otherwise returns a
buffered partd"""
def __init__(self, buffer=True):
self.buffer = buffer
def __reduce__(self):
return (maybe_buffered_partd, (False,))
def __call__(self, *args, **kwargs):
import partd
if self.buffer:
return partd.PandasBlocks(partd.Buffer(partd.Dict(), partd.File()))
else:
return partd.PandasBlocks(partd.File())
def rearrange_by_column_disk(df, column, npartitions=None, compute=False):
""" Shuffle using local disk """
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, column, npartitions)
always_new_token = uuid.uuid1().hex
p = ('zpartd-' + always_new_token,)
dsk1 = {p: (maybe_buffered_partd(),)}
# Partition data on disk
name = 'shuffle-partition-' + always_new_token
dsk2 = {(name, i): (shuffle_group_3, key, column, npartitions, p)
for i, key in enumerate(df._keys())}
dsk = merge(df.dask, dsk1, dsk2)
if compute:
keys = [p, sorted(dsk2)]
pp, values = (_globals.get('get') or DataFrame._get)(dsk, keys)
dsk1 = {p: pp}
dsk = dict(zip(sorted(dsk2), values))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect-' + token
dsk4 = {(name, i): (collect, p, i, df._meta, barrier_token)
for i in range(npartitions)}
divisions = (None,) * (npartitions + 1)
dsk = merge(dsk, dsk1, dsk3, dsk4)
return DataFrame(dsk, name, df._meta, divisions)
def rearrange_by_column_tasks(df, column, max_branch=32, npartitions=None):
""" Order divisions of DataFrame so that all values within column align
This enacts a task-based shuffle
See also:
rearrange_by_column_disk
set_partitions_tasks
shuffle_tasks
"""
max_branch = max_branch or 32
n = df.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(k**stages)]
token = tokenize(df, column, max_branch)
start = dict((('shuffle-join-' + token, 0, inp),
(df._name, i) if i < df.npartitions else df._meta)
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(shuffle_group, ('shuffle-join-' + token, stage - 1, inp),
column, stage - 1, k, n))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(getitem, ('shuffle-group-' + token, stage, inp), i))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(_concat,
[('shuffle-split-' + token, stage, inp[stage - 1],
insert(inp, stage - 1, j)) for j in range(k)]))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i),
('shuffle-join-' + token, stages, inp))
for i, inp in enumerate(inputs))
dsk = merge(df.dask, start, end, *(groups + splits + joins))
df2 = DataFrame(dsk, 'shuffle-' + token, df, df.divisions)
if npartitions is not None and npartitions != df.npartitions:
parts = partitioning_index(pd.Series(range(npartitions)),
df.npartitions)
token = tokenize(df2, npartitions)
dsk = {('repartition-group-' + token, i): (shuffle_group_2, k, column)
for i, k in enumerate(df2._keys())}
for p in range(npartitions):
dsk[('repartition-get-' + token, p)] = \
(shuffle_group_get, ('repartition-group-' + token, parts[p]), p)
df3 = DataFrame(merge(df2.dask, dsk), 'repartition-get-' + token, df2,
[None] * (npartitions + 1))
else:
df3 = df2
df3.divisions = (None,) * (df.npartitions + 1)
return df3
########################################################
# Various convenience functions to be run by the above #
########################################################
def partitioning_index(df, npartitions):
"""Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
if isinstance(df, (pd.Series, pd.Index)):
h = hash_series(df).astype('int64')
elif isinstance(df, pd.DataFrame):
cols = df.iteritems()
h = hash_series(next(cols)[1]).astype('int64')
for _, col in cols:
h = np.multiply(h, 3, h)
h = np.add(h, hash_series(col), h)
else:
raise TypeError("Unexpected type %s" % type(df))
return h % int(npartitions)
def hash_series(s):
"""Given a series, return a numpy array of deterministic integers."""
vals = s.values
dt = vals.dtype
if is_categorical_dtype(dt):
return vals.codes
elif np.issubdtype(dt, np.integer):
return vals
elif np.issubdtype(dt, np.floating):
return np.nan_to_num(vals).view('i' + str(dt.itemsize))
elif dt == np.bool:
return vals.view('int8')
elif np.issubdtype(dt, np.datetime64) or np.issubdtype(dt, np.timedelta64):
return vals.view('int64')
else:
return s.apply(hash).values
def barrier(args):
list(args)
return 0
def collect(p, part, meta, barrier_token):
""" Collect partitions from partd, yield dataframes """
res = p.get(part)
return res if len(res) > 0 else meta
def set_partitions_pre(s, divisions):
partitions = pd.Series(divisions).searchsorted(s, side='right') - 1
partitions[(s >= divisions[-1]).values] = len(divisions) - 2
return partitions
def shuffle_group_2(df, col):
g = df.groupby(col)
return {i: g.get_group(i) for i in g.groups}, df.head(0)
def shuffle_group_get(g_head, i):
g, head = g_head
if i in g:
return g[i]
else:
return head
def shuffle_group(df, col, stage, k, npartitions):
ind = partitioning_index(df[col], npartitions)
c = ind // k ** stage % k
g = df.groupby(c)
return {i: g.get_group(i) if i in g.groups else df.head(0) for i in range(k)}
def shuffle_group_3(df, col, npartitions, p):
g = df.groupby(col)
d = {i: g.get_group(i) for i in g.groups}
p.append(d, fsync=True)
def set_index_post_scalar(df, index_name, drop, column_dtype):
df2 = df.drop('_partitions', axis=1).set_index(index_name, drop=drop)
df2.columns = df2.columns.astype(column_dtype)
return df2
def set_index_post_series(df, index_name, drop, column_dtype):
df2 = df.drop('_partitions', axis=1).set_index('_index', drop=True)
df2.index.name = index_name
df2.columns = df2.columns.astype(column_dtype)
return df2
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/shuffle.py",
"copies": "1",
"size": "13817",
"license": "mit",
"hash": 1825083798394446000,
"line_mean": 33.0320197044,
"line_max": 81,
"alpha_frac": 0.5939060578,
"autogenerated": false,
"ratio": 3.810535024820739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4904441082620739,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
from operator import getitem
import uuid
import numpy as np
import pandas as pd
from toolz import merge
from .methods import drop_columns
from .core import DataFrame, Series, _Frame, _concat, map_partitions
from .hashing import hash_pandas_object
from .utils import PANDAS_VERSION
from .. import base
from ..base import tokenize, compute
from ..context import _globals
from ..delayed import delayed
from ..sizeof import sizeof
from ..utils import digit, insert, M
if PANDAS_VERSION >= '0.20.0':
from pandas._libs.algos import groupsort_indexer
else:
from pandas.algos import groupsort_indexer
def set_index(df, index, npartitions=None, shuffle=None, compute=False,
drop=True, upsample=1.0, divisions=None, **kwargs):
""" See _Frame.set_index for docstring """
if (isinstance(index, Series) and index._name == df.index._name):
return df
if isinstance(index, (DataFrame, tuple, list)):
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index))
if npartitions == 'auto':
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
if divisions is None:
divisions = index2._repartition_quantiles(npartitions, upsample=upsample)
if repartition:
parts = df.to_delayed()
sizes = [delayed(sizeof)(part) for part in parts]
else:
sizes = []
iparts = index2.to_delayed()
mins = [ipart.min() for ipart in iparts]
maxes = [ipart.max() for ipart in iparts]
divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)
divisions = divisions.tolist()
if repartition:
total = sum(sizes)
npartitions = max(math.ceil(total / 128e6), 1)
npartitions = min(npartitions, df.npartitions)
n = len(divisions)
try:
divisions = np.interp(x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = [divisions[i] for i in indexes]
if (mins == sorted(mins) and maxes == sorted(maxes) and
all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))):
divisions = mins + [maxes[-1]]
result = set_sorted_index(df, index, drop=drop, divisions=divisions)
# There are cases where this still may not be sorted
# so sort_index to be sure. https://github.com/dask/dask/issues/2288
return result.map_partitions(M.sort_index)
return set_partition(df, index, divisions, shuffle=shuffle, drop=drop,
compute=compute, **kwargs)
def set_partition(df, index, divisions, max_branch=32, drop=True, shuffle=None,
compute=None):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
drop: bool, default True
Whether to delete columns to be used as the new index
shuffle: str (optional)
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
max_branch: int (optional)
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
See Also
--------
set_index
shuffle
partd
"""
if np.isscalar(index):
partitions = df[index].map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
else:
partitions = index.map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions, _index=index)
df3 = rearrange_by_column(df2, '_partitions', max_branch=max_branch,
npartitions=len(divisions) - 1, shuffle=shuffle,
compute=compute)
if np.isscalar(index):
df4 = df3.map_partitions(set_index_post_scalar, index_name=index,
drop=drop, column_dtype=df.columns.dtype)
else:
df4 = df3.map_partitions(set_index_post_series, index_name=index.name,
drop=drop, column_dtype=df.columns.dtype)
df4.divisions = divisions
return df4.map_partitions(M.sort_index)
def shuffle(df, index, shuffle=None, npartitions=None, max_branch=32,
compute=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
shuffle_disk
shuffle_tasks
"""
if not isinstance(index, _Frame):
index = df[index]
partitions = index.map_partitions(partitioning_index,
npartitions=npartitions or df.npartitions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
df3 = rearrange_by_column(df2, '_partitions', npartitions=npartitions,
max_branch=max_branch, shuffle=shuffle,
compute=compute)
df4 = df3.map_partitions(drop_columns, '_partitions', df.columns.dtype)
return df4
def rearrange_by_divisions(df, column, divisions, max_branch=None, shuffle=None):
""" Shuffle dataframe so that column separates along divisions """
partitions = df[column].map_partitions(set_partitions_pre,
divisions=divisions,
meta=pd.Series([0]))
df2 = df.assign(_partitions=partitions)
df3 = rearrange_by_column(df2, '_partitions', max_branch=max_branch,
npartitions=len(divisions) - 1, shuffle=shuffle)
df4 = df3.drop('_partitions', axis=1)
df4 = df3.map_partitions(drop_columns, '_partitions', df.columns.dtype)
return df4
def rearrange_by_column(df, col, npartitions=None, max_branch=None,
shuffle=None, compute=None):
shuffle = shuffle or _globals.get('shuffle', 'disk')
if shuffle == 'disk':
return rearrange_by_column_disk(df, col, npartitions, compute=compute)
elif shuffle == 'tasks':
return rearrange_by_column_tasks(df, col, max_branch, npartitions)
else:
raise NotImplementedError("Unknown shuffle method %s" % shuffle)
class maybe_buffered_partd(object):
"""If serialized, will return non-buffered partd. Otherwise returns a
buffered partd"""
def __init__(self, buffer=True, tempdir=None):
self.tempdir = tempdir or _globals.get('temporary_directory')
self.buffer = buffer
def __reduce__(self):
if self.tempdir:
return (maybe_buffered_partd, (False, self.tempdir))
else:
return (maybe_buffered_partd, (False,))
def __call__(self, *args, **kwargs):
import partd
if self.tempdir:
file = partd.File(dir=self.tempdir)
else:
file = partd.File()
if self.buffer:
return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))
else:
return partd.PandasBlocks(file)
def rearrange_by_column_disk(df, column, npartitions=None, compute=False):
""" Shuffle using local disk """
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, column, npartitions)
always_new_token = uuid.uuid1().hex
p = ('zpartd-' + always_new_token,)
dsk1 = {p: (maybe_buffered_partd(),)}
# Partition data on disk
name = 'shuffle-partition-' + always_new_token
dsk2 = {(name, i): (shuffle_group_3, key, column, npartitions, p)
for i, key in enumerate(df._keys())}
dsk = merge(df.dask, dsk1, dsk2)
if compute:
keys = [p, sorted(dsk2)]
pp, values = (_globals.get('get') or DataFrame._get)(dsk, keys)
dsk1 = {p: pp}
dsk = dict(zip(sorted(dsk2), values))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect-' + token
dsk4 = {(name, i): (collect, p, i, df._meta, barrier_token)
for i in range(npartitions)}
divisions = (None,) * (npartitions + 1)
dsk = merge(dsk, dsk1, dsk3, dsk4)
return DataFrame(dsk, name, df._meta, divisions)
def rearrange_by_column_tasks(df, column, max_branch=32, npartitions=None):
""" Order divisions of DataFrame so that all values within column align
This enacts a task-based shuffle
See also:
rearrange_by_column_disk
set_partitions_tasks
shuffle_tasks
"""
max_branch = max_branch or 32
n = df.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(k**stages)]
token = tokenize(df, column, max_branch)
start = dict((('shuffle-join-' + token, 0, inp),
(df._name, i) if i < df.npartitions else df._meta)
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(shuffle_group, ('shuffle-join-' + token, stage - 1, inp),
column, stage - 1, k, n))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(getitem, ('shuffle-group-' + token, stage, inp), i))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(_concat,
[('shuffle-split-' + token, stage, inp[stage - 1],
insert(inp, stage - 1, j)) for j in range(k)]))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i),
('shuffle-join-' + token, stages, inp))
for i, inp in enumerate(inputs))
dsk = merge(df.dask, start, end, *(groups + splits + joins))
df2 = DataFrame(dsk, 'shuffle-' + token, df, df.divisions)
if npartitions is not None and npartitions != df.npartitions:
parts = [i % df.npartitions for i in range(npartitions)]
token = tokenize(df2, npartitions)
dsk = {('repartition-group-' + token, i): (shuffle_group_2, k, column)
for i, k in enumerate(df2._keys())}
for p in range(npartitions):
dsk[('repartition-get-' + token, p)] = \
(shuffle_group_get, ('repartition-group-' + token, parts[p]), p)
df3 = DataFrame(merge(df2.dask, dsk), 'repartition-get-' + token, df2,
[None] * (npartitions + 1))
else:
df3 = df2
df3.divisions = (None,) * (df.npartitions + 1)
return df3
########################################################
# Various convenience functions to be run by the above #
########################################################
def partitioning_index(df, npartitions):
"""
Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
return hash_pandas_object(df, index=False) % int(npartitions)
def barrier(args):
list(args)
return 0
def collect(p, part, meta, barrier_token):
""" Collect partitions from partd, yield dataframes """
res = p.get(part)
return res if len(res) > 0 else meta
def set_partitions_pre(s, divisions):
partitions = pd.Series(divisions).searchsorted(s, side='right') - 1
partitions[(s >= divisions[-1]).values] = len(divisions) - 2
return partitions
def shuffle_group_2(df, col):
if not len(df):
return {}, df
ind = df[col]._values.astype(np.int64)
n = ind.max() + 1
indexer, locations = groupsort_indexer(ind.view(np.int64), n)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]
result2 = dict(zip(range(n), parts))
return result2, df.iloc[:0]
def shuffle_group_get(g_head, i):
g, head = g_head
if i in g:
return g[i]
else:
return head
def shuffle_group(df, col, stage, k, npartitions):
if col == '_partitions':
ind = df[col]
else:
ind = hash_pandas_object(df[col], index=False)
c = ind._values
typ = np.min_scalar_type(npartitions * 2)
c = c.astype(typ)
npartitions, k, stage = [np.array(x, dtype=np.min_scalar_type(x))[()]
for x in [npartitions, k, stage]]
c = np.mod(c, npartitions, out=c)
c = np.floor_divide(c, k ** stage, out=c)
c = np.mod(c, k, out=c)
indexer, locations = groupsort_indexer(c.astype(np.int64), k)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]
return dict(zip(range(k), parts))
def shuffle_group_3(df, col, npartitions, p):
g = df.groupby(col)
d = {i: g.get_group(i) for i in g.groups}
p.append(d, fsync=True)
def set_index_post_scalar(df, index_name, drop, column_dtype):
df2 = df.drop('_partitions', axis=1).set_index(index_name, drop=drop)
df2.columns = df2.columns.astype(column_dtype)
return df2
def set_index_post_series(df, index_name, drop, column_dtype):
df2 = df.drop('_partitions', axis=1).set_index('_index', drop=True)
df2.index.name = index_name
df2.columns = df2.columns.astype(column_dtype)
return df2
def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
if not divisions:
divisions = compute_divisions(result, **kwargs)
elif len(divisions) != len(df.divisions):
msg = ("When doing `df.set_index(col, sorted=True, divisions=...)`, "
"divisions indicates known splits in the index column. In this "
"case divisions must be the same length as the existing "
"divisions in `df`\n\n"
"If the intent is to repartition into new divisions after "
"setting the index, you probably want:\n\n"
"`df.set_index(col, sorted=True).repartition(divisions=divisions)`")
raise ValueError(msg)
result.divisions = tuple(divisions)
return result
def compute_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a > b for a, b in zip(mins, maxes))):
raise ValueError("Partitions must be sorted ascending with the index",
mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
return divisions
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/shuffle.py",
"copies": "1",
"size": "17192",
"license": "bsd-3-clause",
"hash": -597147565638344100,
"line_mean": 34.0142566191,
"line_max": 83,
"alpha_frac": 0.590739879,
"autogenerated": false,
"ratio": 3.7439024390243905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9833522886631583,
"avg_score": 0.00022388627856148924,
"num_lines": 491
} |
from __future__ import (absolute_import, division, print_function)
import math
from sympy import Interval
from sympy.calculus.singularities import is_increasing, is_decreasing
from sympy.codegen.rewriting import Optimization
from sympy.core.function import UndefinedFunction
"""
This module collects classes useful for approimate rewriting of expressions.
This can be beneficial when generating numeric code for which performance is
of greater importance than precision (e.g. for preconditioners used in iterative
methods).
"""
class SumApprox(Optimization):
""" Approximates sum by neglecting small terms
If terms are expressions which can be determined to be monotonic, then
bounds for those expressions are added.
Parameters
==========
bounds : dict
Mapping expressions to length 2 tuple of bounds (low, high).
reltol : number
Threshold for when to ignore a term. Taken relative to the largest
lower bound among bounds.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x, y, z
>>> from sympy.codegen.rewriting import optimize
>>> from sympy.codegen.approximations import SumApprox
>>> bounds = {x: (-1, 1), y: (1000, 2000), z: (-10, 3)}
>>> sum_approx3 = SumApprox(bounds, reltol=1e-3)
>>> sum_approx2 = SumApprox(bounds, reltol=1e-2)
>>> sum_approx1 = SumApprox(bounds, reltol=1e-1)
>>> expr = 3*(x + y + exp(z))
>>> optimize(expr, [sum_approx3])
3*(x + y + exp(z))
>>> optimize(expr, [sum_approx2])
3*y + 3*exp(z)
>>> optimize(expr, [sum_approx1])
3*y
"""
def __init__(self, bounds, reltol, **kwargs):
super(SumApprox, self).__init__(**kwargs)
self.bounds = bounds
self.reltol = reltol
def __call__(self, expr):
return expr.factor().replace(self.query, lambda arg: self.value(arg))
def query(self, expr):
return expr.is_Add
def value(self, add):
for term in add.args:
if term.is_number or term in self.bounds or len(term.free_symbols) != 1:
continue
fs, = term.free_symbols
if fs not in self.bounds:
continue
intrvl = Interval(*self.bounds[fs])
if is_increasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][0]}),
term.subs({fs: self.bounds[fs][1]})
)
elif is_decreasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][1]}),
term.subs({fs: self.bounds[fs][0]})
)
else:
return add
if all(term.is_number or term in self.bounds for term in add.args):
bounds = [(term, term) if term.is_number else self.bounds[term] for term in add.args]
largest_abs_guarantee = 0
for lo, hi in bounds:
if lo <= 0 <= hi:
continue
largest_abs_guarantee = max(largest_abs_guarantee,
min(abs(lo), abs(hi)))
new_terms = []
for term, (lo, hi) in zip(add.args, bounds):
if max(abs(lo), abs(hi)) >= largest_abs_guarantee*self.reltol:
new_terms.append(term)
return add.func(*new_terms)
else:
return add
class SeriesApprox(Optimization):
""" Approximates functions by expanding them as a series
Parameters
==========
bounds : dict
Mapping expressions to length 2 tuple of bounds (low, high).
reltol : number
Threshold for when to ignore a term. Taken relative to the largest
lower bound among bounds.
max_order : int
Largest order to include in series expansion
n_point_checks : int (even)
The validity of an expansion (with respect to reltol) is checked at
discrete points (linearly spaced over the bounds of the variable). The
number of points used in this numerical check is given by this number.
Examples
========
>>> from sympy import sin, pi
>>> from sympy.abc import x, y
>>> from sympy.codegen.rewriting import optimize
>>> from sympy.codegen.approximations import SeriesApprox
>>> bounds = {x: (-.1, .1), y: (pi-1, pi+1)}
>>> series_approx2 = SeriesApprox(bounds, reltol=1e-2)
>>> series_approx3 = SeriesApprox(bounds, reltol=1e-3)
>>> series_approx8 = SeriesApprox(bounds, reltol=1e-8)
>>> expr = sin(x)*sin(y)
>>> optimize(expr, [series_approx2])
x*(-y + (y - pi)**3/6 + pi)
>>> optimize(expr, [series_approx3])
(-x**3/6 + x)*sin(y)
>>> optimize(expr, [series_approx8])
sin(x)*sin(y)
"""
def __init__(self, bounds, reltol, max_order=4, n_point_checks=4, **kwargs):
super(SeriesApprox, self).__init__(**kwargs)
self.bounds = bounds
self.reltol = reltol
self.max_order = max_order
if n_point_checks % 2 == 1:
raise ValueError("Checking the solution at expansion point is not helpful")
self.n_point_checks = n_point_checks
self._prec = math.ceil(-math.log10(self.reltol))
def __call__(self, expr):
return expr.factor().replace(self.query, lambda arg: self.value(arg))
def query(self, expr):
return (expr.is_Function and not isinstance(expr, UndefinedFunction)
and len(expr.args) == 1)
def value(self, fexpr):
free_symbols = fexpr.free_symbols
if len(free_symbols) != 1:
return fexpr
symb, = free_symbols
if symb not in self.bounds:
return fexpr
lo, hi = self.bounds[symb]
x0 = (lo + hi)/2
cheapest = None
for n in range(self.max_order+1, 0, -1):
fseri = fexpr.series(symb, x0=x0, n=n).removeO()
n_ok = True
for idx in range(self.n_point_checks):
x = lo + idx*(hi - lo)/(self.n_point_checks - 1)
val = fseri.xreplace({symb: x})
ref = fexpr.xreplace({symb: x})
if abs((1 - val/ref).evalf(self._prec)) > self.reltol:
n_ok = False
break
if n_ok:
cheapest = fseri
else:
break
if cheapest is None:
return fexpr
else:
return cheapest
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/codegen/approximations.py",
"copies": "3",
"size": "6499",
"license": "bsd-3-clause",
"hash": -867784467218314000,
"line_mean": 34.1297297297,
"line_max": 97,
"alpha_frac": 0.5688567472,
"autogenerated": false,
"ratio": 3.7286288009179573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5797485548117958,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
import cmath
import unittest
import numpy as np
from numpy import testing
from numpy.testing import assert_
import blaze
import datashape
from blaze.datadescriptor import dd_as_py
from blaze.py2help import skip
def assert_almost_equal(actual, desired, **kwargs):
return testing.assert_almost_equal(np.array(actual),
np.array(desired), **kwargs)
def assert_allclose(actual, desired, **kwargs):
return testing.assert_allclose(np.array(actual),
np.array(desired), **kwargs)
def assert_equal(actual, desired, **kwargs):
return testing.assert_equal(np.array(actual), np.array(desired), **kwargs)
def assert_array_equal(actual, desired, **kwargs):
return testing.assert_array_equal(np.array(actual),
np.array(desired), **kwargs)
# Many of these tests have been adapted from NumPy's test_umath.py test file
class TestBitwiseOps(unittest.TestCase):
def test_bitwise_or_bool(self):
t = blaze.array(True)
f = blaze.array(False)
self.assertEqual(dd_as_py((t | t)._data), True)
self.assertEqual(dd_as_py((t | f)._data), True)
self.assertEqual(dd_as_py((f | t)._data), True)
self.assertEqual(dd_as_py((f | f)._data), False)
def test_bitwise_or_uint64(self):
x, y = 0x3192573469a2b3a1, 0x9274a2e219c27638
a = blaze.array(x, 'uint64')
b = blaze.array(y, 'uint64')
self.assertEqual(dd_as_py((a | b)._data), x | y)
self.assertEqual(dd_as_py(blaze.bitwise_or(a, b)._data), x | y)
def test_bitwise_and_bool(self):
t = blaze.array(True)
f = blaze.array(False)
self.assertEqual(dd_as_py((t & t)._data), True)
self.assertEqual(dd_as_py((t & f)._data), False)
self.assertEqual(dd_as_py((f & t)._data), False)
self.assertEqual(dd_as_py((f & f)._data), False)
def test_bitwise_and_uint64(self):
x, y = 0x3192573469a2b3a1, 0x9274a2e219c27638
a = blaze.array(x, 'uint64')
b = blaze.array(y, 'uint64')
self.assertEqual(dd_as_py((a & b)._data), x & y)
self.assertEqual(dd_as_py(blaze.bitwise_and(a, b)._data), x & y)
def test_bitwise_xor_bool(self):
t = blaze.array(True)
f = blaze.array(False)
self.assertEqual(dd_as_py((t ^ t)._data), False)
self.assertEqual(dd_as_py((t ^ f)._data), True)
self.assertEqual(dd_as_py((f ^ t)._data), True)
self.assertEqual(dd_as_py((f ^ f)._data), False)
def test_bitwise_xor_uint64(self):
x, y = 0x3192573469a2b3a1, 0x9274a2e219c27638
a = blaze.array(x, 'uint64')
b = blaze.array(y, 'uint64')
self.assertEqual(dd_as_py((a ^ b)._data), x ^ y)
self.assertEqual(dd_as_py(blaze.bitwise_xor(a, b)._data), x ^ y)
def test_bitwise_not_bool(self):
t = blaze.array(True)
f = blaze.array(False)
self.assertEqual(dd_as_py((~t)._data), False)
self.assertEqual(dd_as_py((~f)._data), True)
def test_bitwise_not_uint64(self):
x = 0x3192573469a2b3a1
a = blaze.array(x, 'uint64')
self.assertEqual(dd_as_py((~a)._data), x ^ 0xffffffffffffffff)
self.assertEqual(dd_as_py(blaze.bitwise_not(a)._data),
x ^ 0xffffffffffffffff)
class TestPower(unittest.TestCase):
def test_power_float(self):
x = blaze.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., math.sqrt(2), math.sqrt(3)])
def test_power_complex(self):
x = blaze.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [cmath.sqrt(1+2j), cmath.sqrt(2+3j),
cmath.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = blaze.array([z], dshape="complex[float64]")
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
zero = blaze.array([0j])
one = blaze.array([1+0j])
cnan = blaze.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.array(x), np.array(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(blaze.power(zero, p), zero)
# zero power
assert_complex_equal(blaze.power(zero, 0), one)
assert_complex_equal(blaze.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(blaze.power(zero, -p), cnan)
assert_complex_equal(blaze.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x = blaze.array([1, 2, 3], dshape="int16")
self.assertEqual((x**2.00001).dshape, (x**2.0).dshape)
class TestLog(unittest.TestCase):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for ds in ['float32', 'float64']:
log2_ = 0.69314718055994530943
xf = blaze.array(x, dshape=ds)
yf = blaze.array(y, dshape=ds)*log2_
result = blaze.log(xf)
assert_almost_equal(result, yf)
class TestExp(unittest.TestCase):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for ds in ['float32', 'float64']:
log2_ = 0.69314718055994530943
xf = blaze.array(x, dshape=ds)
yf = blaze.array(y, dshape=ds)*log2_
result = blaze.exp(yf)
assert_almost_equal(result, xf)
class TestLogAddExp(unittest.TestCase):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for ds, dec in zip(['float32', 'float64'], [6, 15]):
xf = blaze.log(blaze.array(x, dshape=ds))
yf = blaze.log(blaze.array(y, dshape=ds))
zf = blaze.log(blaze.array(z, dshape=ds))
result = blaze.logaddexp(xf, yf)
assert_almost_equal(result, zf, decimal=dec)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for ds in ['float32', 'float64']:
logxf = blaze.array(x, dshape=ds)
logyf = blaze.array(y, dshape=ds)
logzf = blaze.array(z, dshape=ds)
result = blaze.logaddexp(logxf, logyf)
assert_almost_equal(result, logzf)
def test_inf(self):
inf = blaze.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
for ds in ['float32', 'float64']:
logxf = blaze.array(x, dshape=ds)
logyf = blaze.array(y, dshape=ds)
logzf = blaze.array(z, dshape=ds)
result = blaze.logaddexp(logxf, logyf)
assert_equal(result, logzf)
def test_nan(self):
self.assertTrue(blaze.isnan(blaze.logaddexp(blaze.nan, blaze.inf)))
self.assertTrue(blaze.isnan(blaze.logaddexp(blaze.inf, blaze.nan)))
self.assertTrue(blaze.isnan(blaze.logaddexp(blaze.nan, 0)))
self.assertTrue(blaze.isnan(blaze.logaddexp(0, blaze.nan)))
self.assertTrue(blaze.isnan(blaze.logaddexp(blaze.nan, blaze.nan)))
class TestLog2(unittest.TestCase):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for ds in ['float32', 'float64']:
xf = blaze.array(x, dshape=ds)
yf = blaze.array(y, dshape=ds)
result = blaze.log2(xf)
assert_almost_equal(result, yf)
class TestLog10(unittest.TestCase):
def test_log10_values(self):
x = [1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for ds in ['float32', 'float64']:
xf = blaze.array(x, dshape=ds)
yf = blaze.array(y, dshape=ds)
result = blaze.log10(xf)
assert_almost_equal(result, yf)
class TestExp2(unittest.TestCase):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for ds in ['float32', 'float64']:
xf = blaze.array(x, dshape=ds)
yf = blaze.array(y, dshape=ds)
result = blaze.exp2(yf)
assert_almost_equal(result, xf)
class TestLogAddExp2(unittest.TestCase):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for ds, dec in zip(['float32', 'float64'], [6, 15, 15]):
xf = blaze.log2(blaze.array(x, dshape=ds))
yf = blaze.log2(blaze.array(y, dshape=ds))
zf = blaze.log2(blaze.array(z, dshape=ds))
result = blaze.logaddexp2(xf, yf)
assert_almost_equal(result, zf, decimal=dec)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for ds in ['float32', 'float64']:
logxf = blaze.array(x, dshape=ds)
logyf = blaze.array(y, dshape=ds)
logzf = blaze.array(z, dshape=ds)
result = blaze.logaddexp2(logxf, logyf)
assert_almost_equal(result, logzf)
def test_inf(self):
inf = blaze.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
for ds in ['float32', 'float64']:
logxf = blaze.array(x, dshape=ds)
logyf = blaze.array(y, dshape=ds)
logzf = blaze.array(z, dshape=ds)
result = blaze.logaddexp2(logxf, logyf)
assert_equal(result, logzf)
def test_nan(self):
self.assertTrue(blaze.isnan(blaze.logaddexp2(blaze.nan, blaze.inf)))
self.assertTrue(blaze.isnan(blaze.logaddexp2(blaze.inf, blaze.nan)))
self.assertTrue(blaze.isnan(blaze.logaddexp2(blaze.nan, 0)))
self.assertTrue(blaze.isnan(blaze.logaddexp2(0, blaze.nan)))
self.assertTrue(blaze.isnan(blaze.logaddexp2(blaze.nan, blaze.nan)))
class TestRint(unittest.TestCase):
def test_rint(self):
a = blaze.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
b = blaze.array([-2., -2., -0., 0., 2., 2., 2.])
result = blaze.rint(a)
assert_equal(result, b)
class TestSign(unittest.TestCase):
def test_sign(self):
a = blaze.array([blaze.inf, -blaze.inf, blaze.nan, 0.0, 3.0, -3.0])
tgt = blaze.array([1., -1., blaze.nan, 0.0, 1.0, -1.0])
result = blaze.sign(a)
assert_equal(result, tgt)
class TestExpm1(unittest.TestCase):
def test_expm1(self):
assert_almost_equal(blaze.expm1(0.2), blaze.exp(0.2)-1)
assert_almost_equal(blaze.expm1(1e-6), blaze.exp(1e-6)-1)
class TestLog1p(unittest.TestCase):
def test_log1p(self):
assert_almost_equal(blaze.log1p(0.2), blaze.log(1.2))
assert_almost_equal(blaze.log1p(1e-6), blaze.log(1+1e-6))
class TestSqrt(unittest.TestCase):
def test_sqrt(self):
a = blaze.array([0., 9., 64., 1e20, 12345])
b = blaze.array([0., 3., 8., 1e10, math.sqrt(12345)])
result = blaze.sqrt(a)
assert_almost_equal(result, b)
class TestSquare(unittest.TestCase):
def test_square(self):
a = blaze.array([0., 3., 8., 1e10, math.sqrt(12345)])
b = blaze.array([0., 9., 64., 1e20, 12345])
result = blaze.square(a)
assert_almost_equal(result, b)
result = blaze.square(-a)
assert_almost_equal(result, b)
class TestReciprocal(unittest.TestCase):
def test_reciprocal(self):
a = blaze.array([1, 2., 3.33])
b = blaze.array([1., 0.5, 0.3003003])
result = blaze.reciprocal(a)
assert_almost_equal(result, b)
class TestAngles(unittest.TestCase):
def test_degrees(self):
assert_almost_equal(blaze.degrees(math.pi), 180.0)
assert_almost_equal(blaze.degrees(-0.5*math.pi), -90.0)
assert_almost_equal(blaze.rad2deg(math.pi), 180.0)
assert_almost_equal(blaze.rad2deg(-0.5*math.pi), -90.0)
def test_radians(self):
assert_almost_equal(blaze.radians(180.0), math.pi)
assert_almost_equal(blaze.radians(-90.0), -0.5*math.pi)
assert_almost_equal(blaze.deg2rad(180.0), math.pi)
assert_almost_equal(blaze.deg2rad(-90.0), -0.5*math.pi)
class TestMod(unittest.TestCase):
def test_remainder_mod_int(self):
a = blaze.array([-3, -2, -1, 0, 1, 2, 3])
a_mod_2 = blaze.array([1, 0, 1, 0, 1, 0, 1])
a_mod_3 = blaze.array([0, 1, 2, 0, 1, 2, 0])
assert_equal(blaze.remainder(a, 2), a_mod_2)
assert_equal(blaze.mod(a, 2), a_mod_2)
assert_equal(blaze.remainder(a, 3), a_mod_3)
assert_equal(blaze.mod(a, 3), a_mod_3)
def test_remainder_mod_float(self):
a = blaze.array([-3, -2, -1, 0, 1, 2, 3], dshape='float32')
a_mod_2 = blaze.array([1, 0, 1, 0, 1, 0, 1], dshape='float32')
a_mod_3 = blaze.array([0, 1, 2, 0, 1, 2, 0], dshape='float32')
assert_equal(blaze.remainder(a, 2), a_mod_2)
assert_equal(blaze.mod(a, 2), a_mod_2)
assert_equal(blaze.remainder(a, 3), a_mod_3)
assert_equal(blaze.mod(a, 3), a_mod_3)
def test_fmod_int(self):
a = blaze.array([-3, -2, -1, 0, 1, 2, 3])
a_fmod_2 = blaze.array([-1, 0, -1, 0, 1, 0, 1])
a_fmod_3 = blaze.array([0, -2, -1, 0, 1, 2, 0])
assert_equal(blaze.fmod(a, 2), a_fmod_2)
assert_equal(blaze.fmod(a, 3), a_fmod_3)
def test_fmod_float(self):
a = blaze.array([-3, -2, -1, 0, 1, 2, 3], dshape='float32')
a_fmod_2 = blaze.array([-1, 0, -1, 0, 1, 0, 1], dshape='float32')
a_fmod_3 = blaze.array([0, -2, -1, 0, 1, 2, 0], dshape='float32')
assert_equal(blaze.fmod(a, 2), a_fmod_2)
assert_equal(blaze.fmod(a, 3), a_fmod_3)
class TestAbs(unittest.TestCase):
def test_simple(self):
x = blaze.array([1+1j, 0+2j, 1+2j, blaze.inf, blaze.nan])
y_r = blaze.array([blaze.sqrt(2.), 2, blaze.sqrt(5),
blaze.inf, blaze.nan])
y = blaze.abs(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_fabs(self):
# Test that blaze.abs(x +- 0j) == blaze.abs(x)
# (as mandated by C99 for cabs)
x = blaze.array([1+0j], dshape="complex[float64]")
assert_array_equal(blaze.abs(x), blaze.real(x))
x = blaze.array([complex(1, -0.)], dshape="complex[float64]")
assert_array_equal(blaze.abs(x), blaze.real(x))
x = blaze.array([complex(blaze.inf, -0.)], dshape="complex[float64]")
assert_array_equal(blaze.abs(x), blaze.real(x))
x = blaze.array([complex(blaze.nan, -0.)], dshape="complex[float64]")
assert_array_equal(blaze.abs(x), blaze.real(x))
def test_cabs_inf_nan(self):
# cabs(+-nan + nani) returns nan
self.assertTrue(blaze.isnan(blaze.abs(complex(blaze.nan, blaze.nan))))
self.assertTrue(blaze.isnan(blaze.abs(complex(-blaze.nan, blaze.nan))))
self.assertTrue(blaze.isnan(blaze.abs(complex(blaze.nan, -blaze.nan))))
self.assertTrue(blaze.isnan(blaze.abs(complex(-blaze.nan, -blaze.nan))))
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
assert_equal(blaze.abs(complex(blaze.inf, blaze.nan)), blaze.inf)
assert_equal(blaze.abs(complex(blaze.nan, blaze.inf)), blaze.inf)
assert_equal(blaze.abs(complex(-blaze.inf, blaze.nan)), blaze.inf)
assert_equal(blaze.abs(complex(blaze.nan, -blaze.inf)), blaze.inf)
values = [complex(blaze.nan, blaze.nan),
complex(-blaze.nan, blaze.nan),
complex(blaze.inf, blaze.nan),
complex(-blaze.inf, blaze.nan)]
for z in values:
abs_conj_z = blaze.abs(blaze.conj(z))
conj_abs_z = blaze.conj(blaze.abs(z))
abs_z = blaze.abs(z)
assert_equal(abs_conj_z, conj_abs_z)
assert_equal(abs_conj_z, abs_z)
assert_equal(conj_abs_z, abs_z)
class TestTrig(unittest.TestCase):
def test_sin(self):
a = blaze.array([0, math.pi/6, math.pi/3, 0.5*math.pi,
math.pi, 1.5*math.pi, 2*math.pi])
b = blaze.array([0, 0.5, 0.5*blaze.sqrt(3), 1, 0, -1, 0])
assert_allclose(blaze.sin(a), b, rtol=1e-15, atol=1e-15)
assert_allclose(blaze.sin(-a), -b, rtol=1e-15, atol=1e-15)
def test_cos(self):
a = blaze.array([0, math.pi/6, math.pi/3, 0.5*math.pi,
math.pi, 1.5*math.pi, 2*math.pi])
b = blaze.array([1, 0.5*blaze.sqrt(3), 0.5, 0, -1, 0, 1])
assert_allclose(blaze.cos(a), b, rtol=1e-15, atol=1e-15)
assert_allclose(blaze.cos(-a), b, rtol=1e-15, atol=1e-15)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
x = -x0
x.real[jr] = 0.*dx.real
x.imag[ji] = 0.*dx.imag
x = -x
ym = f(x)
ym = ym[jr | ji]
y0 = y0[jr | ji]
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
class TestComplexFunctions(unittest.TestCase):
funcs = [blaze.arcsin, blaze.arccos, blaze.arctan, blaze.arcsinh,
blaze.arccosh, blaze.arctanh, blaze.sin, blaze.cos, blaze.tan,
blaze.exp, blaze.exp2, blaze.log, blaze.sqrt, blaze.log10,
blaze.log2, blaze.log1p]
def test_it(self):
for f in self.funcs:
if f is blaze.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(blaze.array(z, dshape='complex[float32]'))
fcd = f(blaze.array(z, dshape='complex[float64]'))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
_check_branch_cut(blaze.log, -0.5, 1j, 1, -1)
_check_branch_cut(blaze.log2, -0.5, 1j, 1, -1)
_check_branch_cut(blaze.log10, -0.5, 1j, 1, -1)
_check_branch_cut(blaze.log1p, -1.5, 1j, 1, -1)
_check_branch_cut(blaze.sqrt, -0.5, 1j, 1, -1)
_check_branch_cut(blaze.arcsin, [-2, 2], [1j, -1j], 1, -1)
_check_branch_cut(blaze.arccos, [-2, 2], [1j, -1j], 1, -1)
_check_branch_cut(blaze.arctan, [-2j, 2j], [1, -1], -1, 1)
_check_branch_cut(blaze.arcsinh, [-2j, 2j], [-1, 1], -1, 1)
_check_branch_cut(blaze.arccosh, [-1, 0.5], [1j, 1j], 1, -1)
_check_branch_cut(blaze.arctanh, [-2, 2], [1j, -1j], 1, -1)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(blaze.arcsin, [-2j, 2j], [1, 1], 1, 1)
_check_branch_cut(blaze.arccos, [-2j, 2j], [1, 1], 1, 1)
_check_branch_cut(blaze.arctan, [-2, 2], [1j, 1j], 1, 1)
_check_branch_cut(blaze.arcsinh, [-2, 2, 0], [1j, 1j, 1], 1, 1)
_check_branch_cut(blaze.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1)
_check_branch_cut(blaze.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1)
@skip("These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
_check_branch_cut(blaze.log, -0.5, 1j, 1, -1, True)
_check_branch_cut(blaze.log2, -0.5, 1j, 1, -1, True)
_check_branch_cut(blaze.log10, -0.5, 1j, 1, -1, True)
_check_branch_cut(blaze.log1p, -1.5, 1j, 1, -1, True)
# XXX: signed zeros are not OK for sqrt or for the arc* functions
_check_branch_cut(blaze.sqrt, -0.5, 1j, 1, -1, True)
_check_branch_cut(blaze.arcsin, [-2, 2], [1j, -1j], 1, -1, True)
_check_branch_cut(blaze.arccos, [-2, 2], [1j, -1j], 1, -1, True)
_check_branch_cut(blaze.arctan, [-2j, 2j], [1, -1], -1, 1, True)
_check_branch_cut(blaze.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True)
_check_branch_cut(blaze.arccosh, [-1, 0.5], [1j, 1j], 1, -1, True)
_check_branch_cut(blaze.arctanh, [-2, 2], [1j, -1j], 1, -1, True)
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.name
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(complex(p)))
b = cfunc(p)
self.assertTrue(abs(a - b) < atol,
"%s %s: %s; cmath: %s" % (fname, p, a, b))
class TestMaximum(unittest.TestCase):
def test_float_nans(self):
nan = blaze.nan
arg1 = blaze.array([0, nan, nan])
arg2 = blaze.array([nan, 0, nan])
out = blaze.array([nan, nan, nan])
assert_equal(blaze.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = blaze.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = blaze.array([0, cnan, cnan])
arg2 = blaze.array([cnan, 0, cnan])
out = blaze.array([nan, nan, nan],
dshape=datashape.complex_float64)
assert_equal(blaze.maximum(arg1, arg2), out)
class TestMinimum(unittest.TestCase):
def test_float_nans(self):
nan = blaze.nan
arg1 = blaze.array([0, nan, nan])
arg2 = blaze.array([nan, 0, nan])
out = blaze.array([nan, nan, nan])
assert_equal(blaze.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = blaze.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = blaze.array([0, cnan, cnan])
arg2 = blaze.array([cnan, 0, cnan])
out = blaze.array([nan, nan, nan],
dshape=datashape.complex_float64)
assert_equal(blaze.minimum(arg1, arg2), out)
class TestFmax(unittest.TestCase):
def test_float_nans(self):
nan = blaze.nan
arg1 = blaze.array([0, nan, nan])
arg2 = blaze.array([nan, 0, nan])
out = blaze.array([0, 0, nan])
assert_equal(blaze.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = blaze.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = blaze.array([0, cnan, cnan])
arg2 = blaze.array([cnan, 0, cnan])
out = blaze.array([0, 0, nan],
dshape=datashape.complex_float64)
assert_equal(blaze.fmax(arg1, arg2), out)
class TestFmin(unittest.TestCase):
def test_float_nans(self):
nan = blaze.nan
arg1 = blaze.array([0, nan, nan])
arg2 = blaze.array([nan, 0, nan])
out = blaze.array([0, 0, nan])
assert_equal(blaze.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = blaze.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = blaze.array([0, cnan, cnan])
arg2 = blaze.array([cnan, 0, cnan])
out = blaze.array([0, 0, nan], dshape=datashape.complex_float64)
assert_equal(blaze.fmin(arg1, arg2), out)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/tests/test_numpy_ufunc_compat.py",
"copies": "1",
"size": "27233",
"license": "bsd-3-clause",
"hash": 4361230375844247600,
"line_mean": 38.3540462428,
"line_max": 80,
"alpha_frac": 0.5510593765,
"autogenerated": false,
"ratio": 2.714883860033895,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3765943236533895,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
import itertools
import operator
import pytest
from datetime import datetime, date
import datashape
from collections import Iterator, Iterable
import blaze
from blaze.compute.python import (nunique, mean, rrowfunc, rowfunc,
reduce_by_funcs, optimize)
from blaze import dshape
from blaze.compute.core import compute, compute_up, pre_compute
from blaze.expr import (symbol, by, merge, join, count, distinct,
Apply, sum, min, max, any, summary,
count, std, head, transform)
import numpy as np
from blaze import cos, sin
from blaze.compatibility import builtins
from blaze.utils import raises
t = symbol('t', 'var * {name: string, amount: int, id: int}')
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
tbig = symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
def test_dispatched_rowfunc():
cw = optimize(t['amount'] + 100, [])
assert rowfunc(t)(t) == t
assert rowfunc(cw)(('Alice', 100, 1)) == 200
def test_reduce_by_funcs():
e = summary(number=t.id.max(), sum=t.amount.sum())
b = by(t, e)
assert reduce_by_funcs(b)[2]([1,2,3], [4,5,6]) == (1, 7)
def test_symbol():
assert compute(t, data) == data
def test_projection():
assert list(compute(t['name'], data)) == [x[0] for x in data]
def test_eq():
assert list(compute(t['amount'] == 100, data)) == [x[1] == 100 for x in data]
def test_selection():
assert list(compute(t[t['amount'] == 0], data)) == \
[x for x in data if x[1] == 0]
assert list(compute(t[t['amount'] > 150], data)) == \
[x for x in data if x[1] > 150]
def test_arithmetic():
assert list(compute(t['amount'] + t['id'], data)) == \
[b + c for a, b, c, in data]
assert list(compute(t['amount'] * t['id'], data)) == \
[b * c for a, b, c, in data]
assert list(compute(t['amount'] % t['id'], data)) == \
[b % c for a, b, c, in data]
def test_unary_ops():
for op in ('cos', 'sin', 'exp', 'ceil', 'floor', 'trunc', 'isnan'):
f = getattr(blaze, op)
pyf = getattr(math, op)
result = list(compute(f(t['amount']), data))
assert result == [pyf(x[1]) for x in data]
def test_neg():
expr = optimize(-t.amount, [])
assert list(compute(expr, data)) == [-x[1] for x in data]
def test_reductions():
assert compute(sum(t['amount']), data) == 100 + 200 + 50
assert compute(min(t['amount']), data) == 50
assert compute(max(t['amount']), data) == 200
assert compute(nunique(t['amount']), data) == 3
assert compute(nunique(t['name']), data) == 2
assert compute(count(t['amount']), data) == 3
assert compute(any(t['amount'] > 150), data) is True
assert compute(any(t['amount'] > 250), data) is False
assert compute(t.amount[0], data) == 100
assert compute(t.amount[-1], data) == 50
def test_1d_reductions_keepdims():
for r in [sum, min, max, nunique, count]:
assert compute(r(t.amount, keepdims=True), data) == \
(compute(r(t.amount), data),)
def test_count():
t = symbol('t', '3 * int')
assert compute(t.count(), [1, None, 2]) == 2
def reduction_runner(funcs):
from blaze.compatibility import builtins as bts
exprs = sum, min, max
for blaze_expr, py_func in itertools.product(exprs, funcs):
f = getattr(operator, py_func)
reduc_f = getattr(bts, blaze_expr.__name__)
ground_truth = f(reduc_f([100, 200, 50]), 5)
assert compute(f(blaze_expr(t['amount']), 5), data) == ground_truth
def test_reduction_arithmetic():
funcs = 'add', 'mul'
reduction_runner(funcs)
def test_reduction_compare():
funcs = 'eq', 'ne', 'lt', 'gt', 'le', 'ge'
reduction_runner(funcs)
def test_mean():
assert compute(mean(t['amount']), data) == float(100 + 200 + 50) / 3
assert 50 < compute(std(t['amount']), data) < 100
def test_std():
amt = [row[1] for row in data]
assert np.allclose(compute(t.amount.std(), data), np.std(amt))
assert np.allclose(compute(t.amount.std(unbiased=True), data),
np.std(amt, ddof=1))
assert np.allclose(compute(t.amount.var(), data), np.var(amt))
assert np.allclose(compute(t.amount.var(unbiased=True), data),
np.var(amt, ddof=1))
def test_by_no_grouper():
names = t['name']
assert set(compute(by(names, count=names.count()), data)) == \
set([('Alice', 2), ('Bob', 1)])
def test_by_one():
print(compute(by(t['name'], total=t['amount'].sum()), data))
assert set(compute(by(t['name'], total=t['amount'].sum()), data)) == \
set([('Alice', 150), ('Bob', 200)])
def test_by_compound_apply():
print(compute(by(t['name'], total=(t['amount'] + 1).sum()), data))
assert set(compute(by(t['name'], total=(t['amount'] + 1).sum()), data)) == \
set([('Alice', 152), ('Bob', 201)])
def test_by_two():
result = compute(by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
databig)
expected = [('Alice', 'F', 200),
('Drew', 'F', 100),
('Drew', 'M', 300)]
print(set(result))
assert set(result) == set(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
databig)
expected = [('Alice', 'F', 204),
('Drew', 'F', 104),
('Drew', 'M', 310)]
print(result)
assert set(result) == set(expected)
def test_works_on_generators():
assert list(compute(t['amount'], iter(data))) == \
[x[1] for x in data]
assert list(compute(t['amount'], (i for i in data))) == \
[x[1] for x in data]
def test_join():
left = [['Alice', 100], ['Bob', 200]]
right = [['Alice', 1], ['Bob', 2]]
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
assert dshape(joined.schema) == \
dshape('{name: string, amount: int, id: int}')
result = list(compute(joined, {L: left, R: right}))
expected = [('Alice', 100, 1), ('Bob', 200, 2)]
assert result == expected
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='outer'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
print(list(compute(j, {L: left, R: right})))
assert list(compute(j, {L: left, R: right})) == [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
@pytest.mark.xfail(reason="This doesn't necessarily make sense")
def test_column_of_column():
assert list(compute(t['name']['name'], data)) == \
list(compute(t['name'], data))
def test_distinct():
assert set(compute(distinct(t['name']), data)) == set(['Alice', 'Bob'])
assert set(compute(distinct(t), data)) == set(map(tuple, data))
e = distinct(t)
assert list(compute(e, [])) == []
def test_distinct_count():
t2 = t['name'].distinct()
gby = by(t2, total=t2.count())
result = set(compute(gby, data))
assert result == set([('Alice', 1), ('Bob', 1)])
def test_sort():
assert list(compute(t.sort('amount'), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort('amount', ascending=True), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort(['amount', 'id']), data)) == \
sorted(data, key=lambda x: (x[1], x[2]), reverse=False)
def test_fancy_sort():
assert list(compute(t.sort(t['amount']), data)) ==\
list(compute(t.sort('amount'), data))
assert list(compute(t.sort(t[['amount', 'id']]), data)) ==\
list(compute(t.sort(['amount', 'id']), data))
assert list(compute(t.sort(0-t['amount']), data)) ==\
list(compute(t.sort('amount'), data))[::-1]
def test_sort_on_column():
assert list(compute(t.name.distinct().sort('name'), data)) == \
['Alice', 'Bob']
def test_head():
assert list(compute(t.head(1), data)) == [data[0]]
e = head(t, 101)
p = list(range(1000))
assert len(list(compute(e, p))) == 101
def test_graph_double_join():
idx = [['A', 1],
['B', 2],
['C', 3],
['D', 4],
['E', 5],
['F', 6]]
arc = [[1, 3],
[2, 3],
[4, 3],
[5, 3],
[3, 1],
[2, 1],
[5, 1],
[1, 6],
[2, 6],
[4, 6]]
wanted = [['A'],
['F']]
t_idx = symbol('t_idx', 'var * {name: string, b: int32}')
t_arc = symbol('t_arc', 'var * {a: int32, b: int32}')
t_wanted = symbol('t_wanted', 'var * {name: string}')
j = join(join(t_idx, t_arc, 'b'), t_wanted, 'name')[['name', 'b', 'a']]
result = compute(j, {t_idx: idx, t_arc: arc, t_wanted: wanted})
result = sorted(map(tuple, result))
expected = sorted([('A', 3, 1),
('A', 2, 1),
('A', 5, 1),
('F', 1, 6),
('F', 2, 6),
('F', 4, 6)])
assert result == expected
def test_label():
assert list(compute((t['amount'] * 1).label('foo'), data)) == \
list(compute((t['amount'] * 1), data))
def test_relabel_join():
names = symbol('names', 'var * {first: string, last: string}')
siblings = join(names.relabel({'first': 'left'}),
names.relabel({'first': 'right'}),
'last')[['left', 'right']]
data = [('Alice', 'Smith'),
('Bob', 'Jones'),
('Charlie', 'Smith')]
print(set(compute(siblings, {names: data})))
assert ('Alice', 'Charlie') in set(compute(siblings, {names: data}))
assert ('Alice', 'Bob') not in set(compute(siblings, {names: data}))
def test_map_column():
inc = lambda x: x + 1
assert list(compute(t['amount'].map(inc, 'int'), data)) == [x[1] + 1 for x in data]
def test_map():
assert (list(compute(t.map(lambda tup: tup[1] + tup[2], 'int'), data)) ==
[x[1] + x[2] for x in data])
def test_apply_column():
result = compute(t.amount.apply(builtins.sum, 'real'), data)
expected = compute(t.amount.sum(), data)
assert result == expected
def test_apply():
data2 = tuple(map(tuple, data))
assert compute(t.apply(hash, 'int'), data2) == hash(data2)
def test_map_datetime():
from datetime import datetime
data = [['A', 0], ['B', 1]]
t = symbol('t', 'var * {foo: string, datetime: int64}')
result = list(compute(t['datetime'].map(datetime.utcfromtimestamp,
'datetime'), data))
expected = [datetime(1970, 1, 1, 0, 0, 0), datetime(1970, 1, 1, 0, 0, 1)]
assert result == expected
def test_by_multi_column_grouper():
t = symbol('t', 'var * {x: int, y: int, z: int}')
expr = by(t[['x', 'y']], total=t['z'].count())
data = [(1, 2, 0), (1, 2, 0), (1, 1, 0)]
print(set(compute(expr, data)))
assert set(compute(expr, data)) == set([(1, 2, 2), (1, 1, 1)])
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert list(compute(expr, data)) == [(row[0], row[1] * 2) for row in data]
def test_transform():
expr = transform(t, x=t.amount / t.id)
assert list(compute(expr, data)) == [('Alice', 100, 1, 100),
('Bob', 200, 2, 100),
('Alice', 50, 3, 50 / 3)]
def test_map_columnwise():
colwise = t['amount'] * t['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data]
def test_map_columnwise_of_selection():
tsel = t[t['name'] == 'Alice']
colwise = tsel['amount'] * tsel['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data[::2]]
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
assert list(compute(expr, data)) == ['Alice']
def test_recursive_rowfunc():
f = rrowfunc(t['name'], t)
assert [f(row) for row in data] == [row[0] for row in data]
expr = optimize(t['amount'] + t['id'], [])
f = rrowfunc(expr, t)
assert [f(row) for row in data] == [row[1] + row[2] for row in data]
assert raises(Exception, lambda: rrowfunc(t[t['amount'] < 0]['name'], t))
def test_recursive_rowfunc_is_used():
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2*(101 + 53)),
('Bob', 2*(202))]
assert set(compute(expr, data)) == set(expected)
class TestFunctionExpressions(object):
def test_compound(self):
s = t.amount.mean()
r = compute(s, data)
assert isinstance(r, float)
expr = cos(s) ** 2 + sin(s) ** 2
result = compute(expr, data)
expected = math.cos(r) ** 2 + math.sin(r) ** 2
assert result == expected
def test_user_defined_function(self):
s = t.amount.count()
r = compute(s, data)
assert isinstance(r, int)
def myfunc(x):
return (cos(x) + sin(x)) ** 2 / math.pi
result = compute(myfunc(s), data)
expected = (math.cos(r) + math.sin(r)) ** 2 / math.pi
assert result == expected
def test_user_defined_calls(self):
s = t.amount.count()
r = compute(s, data)
def myother(y):
return 2 + y ** 10
def myfunc(x):
return myother((cos(x) + sin(x)) ** 2 / math.pi)
result = compute(myfunc(s), data)
expected = myother((math.cos(r) + math.sin(r)) ** 2 / math.pi)
assert result == expected
def test_by_groupby_deep():
data = [(1, 2, 'Alice'),
(1, 3, 'Bob'),
(2, 4, 'Alice'),
(2, 4, '')]
schema = '{x: int, y: int, name: string}'
t = symbol('t', datashape.var * schema)
t2 = t[t['name'] != '']
t3 = merge(t2.x, t2.name)
expr = by(t3.name, avg=t3.x.mean())
result = set(compute(expr, data))
assert result == set([('Alice', 1.5), ('Bob', 1.0)])
def test_by_then_sort_dict_items_sequence():
expr = by(tbig.name, total=tbig.amount.sum()).sort('name')
assert compute(expr, databig)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert compute(expr, data) == (3, 350)
assert compute(expr, iter(data)) == (3, 350)
def test_summary_keepdims():
assert compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=True), data) == \
(compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=False), data),)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 150),
('Bob', 1, 200)])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 152),
('Bob', 1, 201)])
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
assert set(compute(expr, data)) == set([('Alice', 2, 151),
('Bob', 1, 201)])
def test_summary_by_first():
expr = by(t.name, amt=t.amount[0])
assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 100)))
def test_summary_by_last():
expr = by(t.name, amt=t.amount[-1])
assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 50)))
def test_reduction_arithmetic():
expr = t.amount.sum() + 1
assert compute(expr, data) == 351
def test_scalar_arithmetic():
x = symbol('x', 'real')
y = symbol('y', 'real')
assert compute(x + y, {x: 2, y: 3}) == 5
assert compute_up(x + y, 2, 3) == 5
assert compute_up(x * y, 2, 3) == 6
assert compute_up(x / y, 6, 3) == 2
assert compute_up(x % y, 4, 3) == 1
assert compute_up(x ** y, 4, 3) == 64
assert compute(x + 1, {x: 2}) == 3
assert compute(x * 2, {x: 2}) == 4
assert compute(1 + x, {x: 2}) == 3
assert compute(2 * x, {x: 2}) == 4
assert compute_up(-x, 1) == -1
assert compute_up(blaze.sin(x), 1) == math.sin(1)
def test_like():
t = symbol('t', 'var * {name: string, city: string}')
data = [('Alice Smith', 'New York'),
('Bob Smith', 'Chicago'),
('Alice Walker', 'LA')]
assert list(compute(t.like(name='Alice*'), data)) == [data[0], data[2]]
assert list(compute(t.like(name='lice*'), data)) == []
assert list(compute(t.like(name='*Smith*'), data)) == [data[0], data[1]]
assert list(compute(t.like(name='*Smith*', city='New York'), data)) == [data[0]]
def test_datetime_comparison():
data = [['Alice', date(2000, 1, 1)],
['Bob', date(2000, 2, 2)],
['Alice', date(2000, 3, 3)]]
t = symbol('t', 'var * {name: string, when: date}')
assert list(compute(t[t.when > '2000-01-01'], data)) == data[1:]
def test_datetime_access():
data = [['Alice', 100, 1, datetime(2000, 1, 1, 1, 1, 1)],
['Bob', 200, 2, datetime(2000, 1, 1, 1, 1, 1)],
['Alice', 50, 3, datetime(2000, 1, 1, 1, 1, 1)]]
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
assert list(compute(t.when.year, data)) == [2000, 2000, 2000]
assert list(compute(t.when.second, data)) == [1, 1, 1]
assert list(compute(t.when.date, data)) == [date(2000, 1, 1)] * 3
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
assert list(compute(t.utcfromtimestamp, [0])) == \
[datetime(1970, 1, 1, 0, 0)]
payments = [{'name': 'Alice', 'payments': [
{'amount': 100, 'when': datetime(2000, 1, 1, 1, 1 ,1)},
{'amount': 200, 'when': datetime(2000, 2, 2, 2, 2, 2)}
]},
{'name': 'Bob', 'payments': [
{'amount': 300, 'when': datetime(2000, 3, 3, 3, 3 ,3)},
{'amount': -400, 'when': datetime(2000, 4, 4, 4, 4, 4)},
{'amount': 500, 'when': datetime(2000, 5, 5, 5, 5, 5)}
]},
]
payments_ordered = [('Alice', [( 100, datetime(2000, 1, 1, 1, 1 ,1)),
( 200, datetime(2000, 2, 2, 2, 2, 2))]),
('Bob', [( 300, datetime(2000, 3, 3, 3, 3 ,3)),
(-400, datetime(2000, 4, 4, 4, 4, 4)),
( 500, datetime(2000, 5, 5, 5, 5, 5))])]
payment_dshape = 'var * {name: string, payments: var * {amount: int32, when: datetime}}'
@pytest.mark.xfail(reason="Can't reason about nested broadcasts yet")
def test_nested():
t = symbol('t', payment_dshape)
assert list(compute(t.name, payments_ordered)) == ['Alice', 'Bob']
assert list(compute(t.payments, payments_ordered)) == \
[p[1] for p in payments_ordered]
assert list(compute(t.payments.amount, payments_ordered)) == \
[(100, 200), (300, -400, 500)]
assert list(compute(t.payments.amount + 1, payments_ordered)) ==\
[(101, 201), (301, -399, 501)]
@pytest.mark.xfail(reason="Can't reason about nested broadcasts yet")
def test_scalar():
s = symbol('s', '{name: string, id: int32, payments: var * {amount: int32, when: datetime}}')
data = ('Alice', 1, ((100, datetime(2000, 1, 1, 1, 1 ,1)),
(200, datetime(2000, 2, 2, 2, 2, 2)),
(300, datetime(2000, 3, 3, 3, 3, 3))))
assert compute(s.name, data) == 'Alice'
assert compute(s.id + 1, data) == 2
assert tuple(compute(s.payments.amount, data)) == (100, 200, 300)
assert tuple(compute(s.payments.amount + 1, data)) == (101, 201, 301)
def test_slice():
assert compute(t[0], data) == data[0]
assert list(compute(t[:2], data)) == list(data[:2])
assert list(compute(t.name[:2], data)) == [data[0][0], data[1][0]]
def test_negative_slicing():
assert list(compute(t[-1:], data)) == data[-1:]
assert list(compute(t[-1:], iter(data))) == data[-1:]
assert list(compute(t[-1], data)) == data[-1]
assert list(compute(t[-1], iter(data))) == data[-1]
assert list(compute(t[-2], data)) == data[-2]
assert list(compute(t[-2], iter(data))) == data[-2]
@pytest.mark.xfail(raises=ValueError,
reason="No support for stop and step having negative values")
def test_negative_slicing_raises_on_stop_and_step_not_None():
assert list(compute(t[-2:-5:-1], data)) == data[-2:-5:-1]
def test_multi_dataset_broadcast():
x = symbol('x', '3 * int')
y = symbol('y', '3 * int')
a = [1, 2, 3]
b = [10, 20, 30]
assert list(compute(x + y, {x: a, y: b})) == [11, 22, 33]
assert list(compute(2*x + (y + 1), {x: a, y: b})) == [13, 25, 37]
@pytest.mark.xfail(reason="Optimize doesn't create multi-table-broadcasts")
def test_multi_dataset_broadcast_with_Record_types():
x = symbol('x', '3 * {p: int, q: int}')
y = symbol('y', '3 * int')
a = [(1, 1), (2, 2), (3, 3)]
b = [10, 20, 30]
assert list(compute(x.p + x.q + y, {x: iter(a), y: iter(b)})) == [12, 24, 36]
def eq(a, b):
if isinstance(a, (Iterable, Iterator)):
a = list(a)
if isinstance(b, (Iterable, Iterator)):
b = list(b)
return a == b
def test_pre_compute():
s = symbol('s', 'var * {a: int, b: int}')
assert pre_compute(s, [(1, 2)]) == [(1, 2)]
assert list(pre_compute(s, iter([(1, 2)]))) == [(1, 2)]
assert list(pre_compute(s, iter([(1, 2), (3, 4)]))) == [(1, 2), (3, 4)]
assert list(pre_compute(s, iter([{'a': 1, 'b': 2},
{'a': 3, 'b': 4}]))) == [(1, 2), (3, 4)]
def test_dicts():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
L = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
d = [{'name': 'Alice', 'amount': 100, 'id': 1},
{'name': 'Bob', 'amount': 200, 'id': 2},
{'name': 'Alice', 'amount': 50, 'id': 3}]
assert list(pre_compute(t, d)) == list(map(tuple, L))
for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]:
assert eq(compute(expr, {t: L}),
compute(expr, {t: d}))
for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]:
assert eq(compute(expr, {t: iter(L)}),
compute(expr, {t: iter(d)}))
assert eq(compute(expr, {t: iter(L)}),
compute(expr, {t: L}))
def test_nelements_list_tuple():
assert compute(t.nelements(), data) == len(data)
def test_nelements_iterator():
x = (row for row in data)
assert compute(t.nelements(), x) == len(data)
def test_nrows():
assert compute(t.nrows, data) == len(data)
x = (row for row in data)
assert compute(t.nrows, x) == len(data)
@pytest.mark.xfail(raises=Exception, reason="Only 1D reductions allowed")
def test_nelements_2D():
assert compute(t.nelements(axis=1), data) == len(data[0])
def test_compute_field_on_dicts():
s = symbol('s', '{x: 3 * int, y: 3 * int}')
d = {'x': [1, 2, 3], 'y': [4, 5, 6]}
assert compute(s.x, {s: d}) == [1, 2, 3]
def test_truncate():
s = symbol('x', 'real')
assert compute(s.truncate(20), 154) == 140
assert compute(s.truncate(0.1), 3.1415) == 3.1
def test_truncate_datetime():
s = symbol('x', 'datetime')
assert compute(s.truncate(2, 'days'), datetime(2002, 1, 3, 12, 30)) ==\
date(2002, 1, 2)
s = symbol('x', 'var * datetime')
assert list(compute(s.truncate(2, 'days'),
[datetime(2002, 1, 3, 12, 30)])) ==\
[date(2002, 1, 2)]
def test_compute_up_on_base():
d = datetime.now()
s = symbol('s', 'datetime')
assert compute(s.minute, d) == d.minute
@pytest.mark.parametrize('keys', [['Alice'], ['Bob', 'Alice']])
def test_isin(keys):
expr = t[t.name.isin(keys)]
result = list(compute(expr, data))
expected = [el for el in data if el[0] in keys]
assert result == expected
| {
"repo_name": "nkhuyu/blaze",
"path": "blaze/compute/tests/test_python_compute.py",
"copies": "2",
"size": "26133",
"license": "bsd-3-clause",
"hash": -7193164704418393000,
"line_mean": 30.0737217598,
"line_max": 97,
"alpha_frac": 0.5152871848,
"autogenerated": false,
"ratio": 3.087547258979206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9597367667442223,
"avg_score": 0.001093355267396718,
"num_lines": 841
} |
from __future__ import absolute_import, division, print_function
import math
import os
import sys
import numpy as np
from numba import unittest_support as unittest
from numba import njit
from numba.compiler import compile_isolated, Flags, types
from numba.runtime import rtsys
from .support import MemoryLeakMixin, TestCase
enable_nrt_flags = Flags()
enable_nrt_flags.set("nrt")
class Dummy(object):
alive = 0
def __init__(self):
type(self).alive += 1
def __del__(self):
type(self).alive -= 1
class TestNrtMemInfo(unittest.TestCase):
"""
Unitest for core MemInfo functionality
"""
def setUp(self):
# Reset the Dummy class
Dummy.alive = 0
def test_meminfo_refct_1(self):
d = Dummy()
self.assertEqual(Dummy.alive, 1)
addr = 0xdeadcafe # some made up location
mi = rtsys.meminfo_new(addr, d)
self.assertEqual(mi.refcount, 1)
del d
self.assertEqual(Dummy.alive, 1)
mi.acquire()
self.assertEqual(mi.refcount, 2)
self.assertEqual(Dummy.alive, 1)
mi.release()
self.assertEqual(mi.refcount, 1)
del mi
self.assertEqual(Dummy.alive, 0)
def test_meminfo_refct_2(self):
d = Dummy()
self.assertEqual(Dummy.alive, 1)
addr = 0xdeadcafe # some made up location
mi = rtsys.meminfo_new(addr, d)
self.assertEqual(mi.refcount, 1)
del d
self.assertEqual(Dummy.alive, 1)
for ct in range(100):
mi.acquire()
self.assertEqual(mi.refcount, 1 + 100)
self.assertEqual(Dummy.alive, 1)
for _ in range(100):
mi.release()
self.assertEqual(mi.refcount, 1)
del mi
self.assertEqual(Dummy.alive, 0)
@unittest.skipIf(sys.version_info < (3,), "memoryview not supported")
def test_fake_memoryview(self):
d = Dummy()
self.assertEqual(Dummy.alive, 1)
addr = 0xdeadcafe # some made up location
mi = rtsys.meminfo_new(addr, d)
self.assertEqual(mi.refcount, 1)
mview = memoryview(mi)
self.assertEqual(mi.refcount, 1)
self.assertEqual(addr, mi.data)
self.assertFalse(mview.readonly)
self.assertIs(mi, mview.obj)
self.assertTrue(mview.c_contiguous)
self.assertEqual(mview.itemsize, 1)
self.assertEqual(mview.ndim, 1)
del d
del mi
self.assertEqual(Dummy.alive, 1)
del mview
self.assertEqual(Dummy.alive, 0)
@unittest.skipIf(sys.version_info < (3,), "memoryview not supported")
def test_memoryview(self):
from ctypes import c_uint32, c_void_p, POINTER, cast
dtype = np.dtype(np.uint32)
bytesize = dtype.itemsize * 10
mi = rtsys.meminfo_alloc(bytesize, safe=True)
addr = mi.data
c_arr = cast(c_void_p(mi.data), POINTER(c_uint32 * 10))
# Check 0xCB-filling
for i in range(10):
self.assertEqual(c_arr.contents[i], 0xcbcbcbcb)
# Init array with ctypes
for i in range(10):
c_arr.contents[i] = i + 1
mview = memoryview(mi)
self.assertEqual(mview.nbytes, bytesize)
self.assertFalse(mview.readonly)
self.assertIs(mi, mview.obj)
self.assertTrue(mview.c_contiguous)
self.assertEqual(mview.itemsize, 1)
self.assertEqual(mview.ndim, 1)
del mi
arr = np.ndarray(dtype=dtype, shape=mview.nbytes // dtype.itemsize,
buffer=mview)
del mview
# Modify array with NumPy
np.testing.assert_equal(np.arange(arr.size) + 1, arr)
arr += 1
# Check value reflected in ctypes
for i in range(10):
self.assertEqual(c_arr.contents[i], i + 2)
self.assertEqual(arr.ctypes.data, addr)
del arr
# At this point the memory is zero filled
# We can't check this deterministically because the memory could be
# consumed by another thread.
def test_buffer(self):
from ctypes import c_uint32, c_void_p, POINTER, cast
dtype = np.dtype(np.uint32)
bytesize = dtype.itemsize * 10
mi = rtsys.meminfo_alloc(bytesize, safe=True)
self.assertEqual(mi.refcount, 1)
addr = mi.data
c_arr = cast(c_void_p(addr), POINTER(c_uint32 * 10))
# Check 0xCB-filling
for i in range(10):
self.assertEqual(c_arr.contents[i], 0xcbcbcbcb)
# Init array with ctypes
for i in range(10):
c_arr.contents[i] = i + 1
arr = np.ndarray(dtype=dtype, shape=bytesize // dtype.itemsize,
buffer=mi)
self.assertEqual(mi.refcount, 1)
del mi
# Modify array with NumPy
np.testing.assert_equal(np.arange(arr.size) + 1, arr)
arr += 1
# Check value reflected in ctypes
for i in range(10):
self.assertEqual(c_arr.contents[i], i + 2)
self.assertEqual(arr.ctypes.data, addr)
del arr
# At this point the memory is zero filled
# We can't check this deterministically because the memory could be
# consumed by another thread.
@unittest.skipUnless(sys.version_info >= (3, 4),
"need Python 3.4+ for the tracemalloc module")
class TestTracemalloc(unittest.TestCase):
"""
Test NRT-allocated memory can be tracked by tracemalloc.
"""
def measure_memory_diff(self, func):
import tracemalloc
tracemalloc.start()
try:
before = tracemalloc.take_snapshot()
# Keep the result and only delete it after taking a snapshot
res = func()
after = tracemalloc.take_snapshot()
del res
return after.compare_to(before, 'lineno')
finally:
tracemalloc.stop()
def test_snapshot(self):
N = 1000000
dtype = np.int8
@njit
def alloc_nrt_memory():
"""
Allocate and return a large array.
"""
return np.empty(N, dtype)
def keep_memory():
return alloc_nrt_memory()
def release_memory():
alloc_nrt_memory()
alloc_lineno = keep_memory.__code__.co_firstlineno + 1
# Warmup JIT
alloc_nrt_memory()
# The large NRT-allocated array should appear topmost in the diff
diff = self.measure_memory_diff(keep_memory)
stat = diff[0]
# There is a slight overhead, so the allocated size won't exactly be N
self.assertGreaterEqual(stat.size, N)
self.assertLess(stat.size, N * 1.01)
frame = stat.traceback[0]
self.assertEqual(os.path.basename(frame.filename), "test_nrt.py")
self.assertEqual(frame.lineno, alloc_lineno)
# If NRT memory is released before taking a snapshot, it shouldn't
# appear.
diff = self.measure_memory_diff(release_memory)
stat = diff[0]
# Something else appears, but nothing the magnitude of N
self.assertLess(stat.size, N * 0.01)
class TestNRTIssue(MemoryLeakMixin, TestCase):
def test_issue_with_refct_op_pruning(self):
"""
GitHub Issue #1244 https://github.com/numba/numba/issues/1244
"""
@njit
def calculate_2D_vector_mag(vector):
x, y = vector
return math.sqrt(x ** 2 + y ** 2)
@njit
def normalize_2D_vector(vector):
normalized_vector = np.empty(2, dtype=np.float64)
mag = calculate_2D_vector_mag(vector)
x, y = vector
normalized_vector[0] = x / mag
normalized_vector[1] = y / mag
return normalized_vector
@njit
def normalize_vectors(num_vectors, vectors):
normalized_vectors = np.empty((num_vectors, 2), dtype=np.float64)
for i in range(num_vectors):
vector = vectors[i]
normalized_vector = normalize_2D_vector(vector)
normalized_vectors[i, 0] = normalized_vector[0]
normalized_vectors[i, 1] = normalized_vector[1]
return normalized_vectors
num_vectors = 10
test_vectors = np.random.random((num_vectors, 2))
got = normalize_vectors(num_vectors, test_vectors)
expected = normalize_vectors.py_func(num_vectors, test_vectors)
np.testing.assert_almost_equal(expected, got)
def test_incref_after_cast(self):
# Issue #1427: when casting a value before returning it, the
# cast result should be incref'ed, not the original value.
def f():
return 0.0, np.zeros(1, dtype=np.int32)
# Note the return type isn't the same as the tuple type above:
# the first element is a complex rather than a float.
cres = compile_isolated(f, (),
types.Tuple((types.complex128,
types.Array(types.int32, 1, 'C')
))
)
z, arr = cres.entry_point()
self.assertPreciseEqual(z, 0j)
self.assertPreciseEqual(arr, np.zeros(1, dtype=np.int32))
def test_refct_pruning_issue_1511(self):
@njit
def f():
a = np.ones(10, dtype=np.float64)
b = np.ones(10, dtype=np.float64)
return a, b[:]
a, b = f()
np.testing.assert_equal(a, b)
np.testing.assert_equal(a, np.ones(10, dtype=np.float64))
def test_refct_pruning_issue_1526(self):
@njit
def udt(image, x, y):
next_loc = np.where(image == 1)
if len(next_loc[0]) == 0:
y_offset = 1
x_offset = 1
else:
y_offset = next_loc[0][0]
x_offset = next_loc[1][0]
next_loc_x = (x - 1) + x_offset
next_loc_y = (y - 1) + y_offset
return next_loc_x, next_loc_y
a = np.array([[1, 0, 1, 0, 1, 0, 0, 1, 0, 0]])
expect = udt.py_func(a, 1, 6)
got = udt(a, 1, 6)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "stefanseefeld/numba",
"path": "numba/tests/test_nrt.py",
"copies": "1",
"size": "10302",
"license": "bsd-2-clause",
"hash": 3339733254993710600,
"line_mean": 29.9369369369,
"line_max": 78,
"alpha_frac": 0.5699864104,
"autogenerated": false,
"ratio": 3.740740740740741,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4810727151140741,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
import re
from operator import getitem
from .compatibility import unicode
from .context import _globals
from .core import add, inc # noqa: F401
from .core import (istask, get_dependencies, subs, toposort, flatten,
reverse_dict, ishashable)
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies
def default_fused_linear_keys_renamer(keys):
"""Create new keys for fused tasks"""
typ = type(keys[0])
if typ is str or typ is unicode:
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0])
return '-'.join(names)
elif (typ is tuple and len(keys[0]) > 0 and
isinstance(keys[0][0], (str, unicode))):
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0][0])
return ('-'.join(names),) + keys[0][1:]
else:
return None
def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):
""" Return new dask graph with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
Supply ``dependencies`` from output of ``cull`` if available to avoid
recomputing dependencies.
**This function is mostly superseded by ``fuse``**
Parameters
----------
dsk: dict
keys: list
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
rename_keys: bool or func, optional
Whether to rename fused keys with ``default_fused_linear_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
func is also accepted, ``new_key = rename_keys(fused_key_list)``.
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dsk, dependencies = fuse(d)
>>> dsk # doctest: +SKIP
{'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}
>>> dsk, dependencies = fuse(d, rename_keys=False)
>>> dsk # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> dsk, dependencies = fuse(d, keys=['b'], rename_keys=False)
>>> dsk # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k, as_list=True)
for k in dsk}
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
dependencies = {k: set(v) for k, v in dependencies.items()}
if rename_keys is True:
key_renamer = default_fused_linear_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
# create a new dask with fused chains
rv = {}
fused = set()
aliases = set()
is_renamed = False
for chain in chains:
if key_renamer is not None:
new_key = key_renamer(chain)
is_renamed = (new_key is not None and new_key not in dsk and
new_key not in rv)
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
dependencies[parent].update(dependencies.pop(child))
dependencies[parent].remove(child)
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
if is_renamed:
rv[new_key] = val
rv[child] = new_key
dependencies[new_key] = dependencies[child]
dependencies[child] = {new_key}
aliases.add(child)
else:
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
if aliases:
for key, deps in dependencies.items():
for old_key in deps & aliases:
new_key = rv[old_key]
deps.remove(old_key)
deps.add(new_key)
rv[key] = subs(rv[key], old_key, new_key)
if keys is not None:
for key in aliases - keys:
del rv[key]
del dependencies[key]
return rv, dependencies
def _flat_set(x):
if x is None:
return set()
elif isinstance(x, set):
return x
elif not isinstance(x, (list, set)):
x = [x]
return set(x)
def inline(dsk, keys=None, inline_constants=True, dependencies=None):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True. Note that
the constant keys will remain in the graph, to remove them follow
``inline`` with ``cull``.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}
"""
if dependencies and isinstance(next(iter(dependencies.values())), list):
dependencies = {k: set(v) for k, v in dependencies.items()}
keys = _flat_set(keys)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
if inline_constants:
keys.update(k for k, v in dsk.items() if
(ishashable(v) and v in dsk) or
(not dependencies[k] and not istask(v)))
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(dict((k, dsk[k]) for k in keys if k in dsk),
dependencies=dependencies)
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & dependencies[key]:
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
dsk2 = keysubs.copy()
for key, val in dsk.items():
if key not in dsk2:
for item in keys & dependencies[key]:
val = subs(val, item, keysubs[item])
dsk2[key] = val
return dsk2
def inline_functions(dsk, output, fast_functions=None, inline_constants=False,
dependencies=None):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [], [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
Protect output keys. In the example below ``i`` is not inlined because it
is marked as an output key.
>>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP
{'out': (add, 'i', (double, 'y')),
'i': (inc, 'x'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
output = set(output)
fast_functions = set(fast_functions)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and functions_of(v).issubset(fast_functions) and
dependents[k] and k not in output
]
if keys:
dsk = inline(dsk, keys, inline_constants=inline_constants,
dependencies=dependencies)
for k in keys:
del dsk[k]
return dsk
def unwrap_partial(func):
while hasattr(func, 'func'):
func = func.func
return func
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
funcs = set()
work = [task]
sequence_types = {list, tuple}
while work:
new_work = []
for task in work:
if type(task) in sequence_types:
if istask(task):
funcs.add(unwrap_partial(task[0]))
new_work += task[1:]
else:
new_work += task
work = new_work
return funcs
def fuse_selections(dsk, head1, head2, merge):
"""Fuse selections with lower operation.
Handles graphs of the form:
``{key1: (head1, key2, ...), key2: (head2, ...)}``
Parameters
----------
dsk : dict
dask graph
head1 : function
The first element of task1
head2 : function
The first element of task2
merge : function
Takes ``task1`` and ``task2`` and returns a merged task to
replace ``task1``.
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> merge = lambda t1, t2: (load, t2[1], t2[2], t1[2])
>>> dsk2 = fuse_selections(dsk, getitem, load, merge)
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
dsk2 = dict()
for k, v in dsk.items():
try:
if (istask(v) and v[0] == head1 and v[1] in dsk and
istask(dsk[v[1]]) and dsk[v[1]][0] == head2):
dsk2[k] = merge(v, dsk[v[1]])
else:
dsk2[k] = v
except TypeError:
dsk2[k] = v
return dsk2
def fuse_getitem(dsk, func, place):
""" Fuse getitem with lower operation
Parameters
----------
dsk: dict
dask graph
func: function
A function in a task to merge
place: int
Location in task to insert the getitem key
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> dsk2 = fuse_getitem(dsk, load, 3) # columns in arg place 3
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
return fuse_selections(dsk, getitem, func,
lambda a, b: tuple(b[:place]) + (a[2], ) + tuple(b[place + 1:]))
def default_fused_keys_renamer(keys):
"""Create new keys for ``fuse`` tasks"""
it = reversed(keys)
first_key = next(it)
typ = type(first_key)
if typ is str or typ is unicode:
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key)
return '-'.join(names)
elif (typ is tuple and len(first_key) > 0 and
isinstance(first_key[0], (str, unicode))):
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key[0])
return ('-'.join(names),) + first_key[1:]
def fuse(dsk, keys=None, dependencies=None, ave_width=None, max_width=None,
max_height=None, max_depth_new_edges=None, rename_keys=None):
""" Fuse tasks that form reductions; more advanced than ``fuse_linear``
This trades parallelism opportunities for faster scheduling by making tasks
less granular. It can replace ``fuse_linear`` in optimization passes.
This optimization applies to all reductions--tasks that have at most one
dependent--so it may be viewed as fusing "multiple input, single output"
groups of tasks into a single task. There are many parameters to fine
tune the behavior, which are described below. ``ave_width`` is the
natural parameter with which to compare parallelism to granularity, so
it should always be specified. Reasonable values for other parameters
with be determined using ``ave_width`` if necessary.
Parameters
----------
dsk: dict
dask graph
keys: list or set, optional
Keys that must remain in the returned dask graph
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
ave_width: float (default 2)
Upper limit for ``width = num_nodes / height``, a good measure of
parallelizability
max_width: int
Don't fuse if total width is greater than this
max_height: int
Don't fuse more than this many levels
max_depth_new_edges: int
Don't fuse if new dependencies are added after this many levels
rename_keys: bool or func, optional
Whether to rename the fused keys with ``default_fused_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
function to create the new name is also accepted.
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
# Assign reasonable, not too restrictive defaults
if ave_width is None:
if _globals.get('fuse_ave_width') is None:
ave_width = 1
else:
ave_width = _globals['fuse_ave_width']
if max_height is None:
if _globals.get('fuse_max_height') is None:
max_height = len(dsk)
else:
max_height = _globals['fuse_max_height']
max_depth_new_edges = (
max_depth_new_edges or
_globals.get('fuse_max_depth_new_edges') or
ave_width + 1.5
)
max_width = (
max_width or
_globals.get('fuse_max_width') or
1.5 + ave_width * math.log(ave_width + 1)
)
if not ave_width or not max_height:
return dsk, dependencies
if rename_keys is None:
rename_keys = _globals.get('fuse_rename_keys', True)
if rename_keys is True:
key_renamer = default_fused_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
if dependencies is None:
deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}
else:
deps = dict(dependencies)
rdeps = {}
for k, vals in deps.items():
for v in vals:
if v not in rdeps:
rdeps[v] = [k]
else:
rdeps[v].append(k)
deps[k] = set(vals)
reducible = {k for k, vals in rdeps.items() if len(vals) == 1}
if keys:
reducible -= keys
if not reducible:
return dsk, deps
rv = dsk.copy()
fused_trees = {}
# These are the stacks we use to store data as we traverse the graph
info_stack = []
children_stack = []
# For speed
deps_pop = deps.pop
reducible_add = reducible.add
reducible_pop = reducible.pop
reducible_remove = reducible.remove
fused_trees_pop = fused_trees.pop
info_stack_append = info_stack.append
info_stack_pop = info_stack.pop
children_stack_append = children_stack.append
children_stack_extend = children_stack.extend
children_stack_pop = children_stack.pop
while reducible:
parent = reducible_pop()
reducible_add(parent)
while parent in reducible:
# Go to the top
parent = rdeps[parent][0]
children_stack_append(parent)
children_stack_extend(reducible & deps[parent])
while True:
child = children_stack[-1]
if child != parent:
children = reducible & deps[child]
while children:
# Depth-first search
children_stack_extend(children)
parent = child
child = children_stack[-1]
children = reducible & deps[child]
else:
children_stack_pop()
# This is a leaf node in the reduction region
# key, task, fused_keys, height, width, number of nodes, fudge, set of edges
info_stack_append((child, rv[child], None if key_renamer is None else [child],
1, 1, 1, 0, deps[child] - reducible))
else:
children_stack_pop()
# Calculate metrics and fuse as appropriate
deps_parent = deps[parent]
edges = deps_parent - reducible
children = deps_parent - edges
num_children = len(children)
if num_children == 1:
(child_key, child_task, child_keys, height, width, num_nodes, fudge,
children_edges) = info_stack_pop()
num_children_edges = len(children_edges)
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = subs(dsk[parent], child_key, child_task)
deps_parent.remove(child_key)
deps_parent |= deps_pop(child_key)
del rv[child_key]
reducible_remove(child_key)
if key_renamer is not None:
child_keys.append(parent)
fused_trees[parent] = child_keys
fused_trees_pop(child_key, None)
if children_stack:
if no_new_edges:
# Linear fuse
info_stack_append((parent, val, child_keys, height, width, num_nodes, fudge, edges))
else:
info_stack_append((parent, val, child_keys, height + 1, width, num_nodes + 1, fudge,
edges))
else:
rv[parent] = val
break
else:
rv[child_key] = child_task
reducible_remove(child_key)
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# This task *implicitly* depends on `edges`
info_stack_append((parent, rv[parent], None if key_renamer is None else [parent],
1, width, 1, fudge, edges))
else:
break
else:
child_keys = []
height = 1
width = 0
num_single_nodes = 0
num_nodes = 0
fudge = 0
children_edges = set()
max_num_edges = 0
children_info = info_stack[-num_children:]
del info_stack[-num_children:]
for cur_key, cur_task, cur_keys, cur_height, cur_width, cur_num_nodes, cur_fudge, \
cur_edges in children_info:
if cur_height == 1:
num_single_nodes += 1
elif cur_height > height:
height = cur_height
width += cur_width
num_nodes += cur_num_nodes
fudge += cur_fudge
if len(cur_edges) > max_num_edges:
max_num_edges = len(cur_edges)
children_edges |= cur_edges
# Fudge factor to account for possible parallelism with the boundaries
num_children_edges = len(children_edges)
fudge += min(num_children - 1, max(0, num_children_edges - max_num_edges))
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width and
num_single_nodes <= ave_width and
width <= max_width and
height <= max_height and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = dsk[parent]
children_deps = set()
for child_info in children_info:
cur_child = child_info[0]
val = subs(val, cur_child, child_info[1])
del rv[cur_child]
children_deps |= deps_pop(cur_child)
reducible_remove(cur_child)
if key_renamer is not None:
fused_trees_pop(cur_child, None)
child_keys.extend(child_info[2])
deps_parent -= children
deps_parent |= children_deps
if key_renamer is not None:
child_keys.append(parent)
fused_trees[parent] = child_keys
if children_stack:
info_stack_append((parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges))
else:
rv[parent] = val
break
else:
for child_info in children_info:
rv[child_info[0]] = child_info[1]
reducible_remove(child_info[0])
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if width > max_width:
width = max_width
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# key, task, height, width, number of nodes, fudge, set of edges
# This task *implicitly* depends on `edges`
info_stack_append((parent, rv[parent], None if key_renamer is None else [parent],
1, width, 1, fudge, edges))
else:
break
# Traverse upwards
parent = rdeps[parent][0]
if key_renamer is not None:
for root_key, fused_keys in fused_trees.items():
alias = key_renamer(fused_keys)
if alias is not None and alias not in rv:
rv[alias] = rv[root_key]
rv[root_key] = alias
deps[alias] = deps[root_key]
deps[root_key] = {alias}
return rv, deps
# Defining `key_split` (used by key renamers in `fuse`) in utils.py
# results in messy circular imports, so define it here instead.
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
u'x'
>>> key_split('x-1')
u'x'
>>> key_split('x-1-2-3')
u'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
u'x'
>>> key_split('hello-world-1')
u'hello-world'
>>> key_split(b'hello-world-1')
u'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
u'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
u'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].lstrip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
def dont_optimize(dsk, keys, **kwargs):
return dsk
| {
"repo_name": "mraspaud/dask",
"path": "dask/optimize.py",
"copies": "2",
"size": "29125",
"license": "bsd-3-clause",
"hash": -82361891368243870,
"line_mean": 34.7800982801,
"line_max": 120,
"alpha_frac": 0.5134077253,
"autogenerated": false,
"ratio": 3.9555887545837294,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002061287866198547,
"num_lines": 814
} |
from __future__ import absolute_import, division, print_function
import math
import pygal
from pygal.style import DefaultStyle
try:
import pygaljs
except ImportError:
opts = {}
else:
opts = {"js": [pygaljs.uri("2.0.x", "pygal-tooltips.js")]}
opts["css"] = [
"file://style.css",
"file://graph.css",
"""inline:
.axis.x text {
text-anchor: middle !important;
}
.tooltip .value {
font-size: 1em !important;
}
"""
]
def log_ceil(x):
x = float(x)
exponent = math.floor(math.log10(x))
exp_mult = math.pow(10, exponent)
mantissa = x / exp_mult
return math.ceil(mantissa) * exp_mult
def history_range(history):
max_ = max(v for serie in history.values() for v in serie)
if max_ > 0:
return (0, log_ceil(max_))
def make_plot(trial_names, history, history2, expr, expr2):
style = DefaultStyle(colors=[
'#ED6C1D', # 3
'#EDC51E', # 4
'#BCED1E', # 5
'#63ED1F', # 6
'#1FED34', # 7
'#ED1D27', # 2
][:len(history)] + [
'#A71DED', # -3
'#4F1EED', # -4
'#1E45ED', # -5
'#1F9EED', # -6
'#1FEDE4', # -7
'#ED1DDA', # -2
][:len(history2)]
)
plot = pygal.Line(
title="Speed in seconds",
x_title="Trial",
x_labels=trial_names,
x_label_rotation=15,
include_x_axis=True,
human_readable=True,
range=history_range(history),
secondary_range=history_range(history2),
style=style,
stroke_style={'width': 2, 'dasharray': '20, 4'},
**opts
)
for mode in sorted(history):
serie = [{'value': value, 'label': expr}
for value in history[mode]]
plot.add(mode, serie, stroke_style={'dasharray': 'none'})
for mode in sorted(history2):
serie = [{'value': value, 'label': expr2}
for value in history2[mode]]
plot.add(mode, serie, secondary=True)
return plot
| {
"repo_name": "eisensheng/pytest-catchlog",
"path": "tests/perf/plot.py",
"copies": "1",
"size": "2102",
"license": "mit",
"hash": -7739433362118053000,
"line_mean": 23.7294117647,
"line_max": 65,
"alpha_frac": 0.5142721218,
"autogenerated": false,
"ratio": 3.2488408037094283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42631129255094286,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib
matplotlib.use("Agg")
import os
import pylab
import numpy as np
from keras import backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Dropout, Input, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import Nadam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.visualize_util import plot
from data_preprocessing import PROJECT_FOLDER_PATH
from data_preprocessing import PROCESSED_DATASET_FOLDER_PATH as DATASET_FOLDER_PATH
from data_preprocessing import PROCESSED_IMAGE_HEIGHT as IMAGE_HEIGHT
from data_preprocessing import PROCESSED_IMAGE_WIDTH as IMAGE_WIDTH
# Choose ResNet50 or InceptionV3 or VGG16
MODEL_NAME = "ResNet50" # "ResNet50" or "InceptionV3" or "VGG16"
if MODEL_NAME == "ResNet50":
from keras.applications.resnet50 import preprocess_input as PREPROCESS_INPUT
from keras.applications.resnet50 import ResNet50 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "activation_40"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00001
elif MODEL_NAME == "InceptionV3":
from keras.applications.inception_v3 import preprocess_input as PREPROCESS_INPUT
from keras.applications.inception_v3 import InceptionV3 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "mixed8"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00001
elif MODEL_NAME == "VGG16":
from keras.applications.vgg16 import preprocess_input as PREPROCESS_INPUT
from keras.applications.vgg16 import VGG16 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "block4_pool"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00005
else:
assert False
# Dataset
TRAIN_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "additional")
# Workspace
ACTUAL_TRAIN_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "additional")
ACTUAL_VALID_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "train")
# Output
OUTPUT_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "phase_1")
OPTIMAL_WEIGHTS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "optimal weights")
OPTIMAL_WEIGHTS_FILE_PATH = os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}.h5".format(MODEL_NAME))
# Training procedure
MAXIMUM_EPOCH_NUM = 1000
PATIENCE = 4
BATCH_SIZE = 32
SEED = 0
def init_model(image_height, image_width, unique_label_num, init_func=INIT_FUNC, bottleneck_layer_name=BOTTLENECK_LAYER_NAME, dropout_ratio=DROPOUT_RATIO, learning_rate=LEARNING_RATE):
def set_model_trainable_properties(model, trainable, bottleneck_layer_name):
for layer in model.layers:
layer.trainable = trainable
if layer.name == bottleneck_layer_name:
break
def get_feature_extractor(input_shape):
feature_extractor = init_func(include_top=False, weights="imagenet", input_shape=input_shape)
set_model_trainable_properties(model=feature_extractor, trainable=False, bottleneck_layer_name=bottleneck_layer_name)
return feature_extractor
def get_dense_classifier(input_shape, unique_label_num):
input_tensor = Input(shape=input_shape)
output_tensor = GlobalAveragePooling2D()(input_tensor)
output_tensor = Dropout(dropout_ratio)(output_tensor)
output_tensor = Dense(unique_label_num, activation="softmax")(output_tensor)
model = Model(input_tensor, output_tensor)
return model
# Initiate the input tensor
if K.image_dim_ordering() == "tf":
input_tensor = Input(shape=(image_height, image_width, 3))
else:
input_tensor = Input(shape=(3, image_height, image_width))
# Define the feature extractor
feature_extractor = get_feature_extractor(input_shape=K.int_shape(input_tensor)[1:])
output_tensor = feature_extractor(input_tensor)
# Define the dense classifier
dense_classifier = get_dense_classifier(input_shape=feature_extractor.output_shape[1:], unique_label_num=unique_label_num)
output_tensor = dense_classifier(output_tensor)
# Define the overall model
model = Model(input_tensor, output_tensor)
model.compile(optimizer=Nadam(lr=learning_rate), loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# Plot the model structures
plot(feature_extractor, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_feature_extractor.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
plot(dense_classifier, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_dense_classifier.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
plot(model, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_model.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
return model
def load_dataset(folder_path, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH), classes=None, class_mode=None, batch_size=BATCH_SIZE, shuffle=True, seed=None, preprocess_input=PREPROCESS_INPUT):
# Get the generator of the dataset
data_generator_object = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=lambda sample: preprocess_input(np.array([sample]))[0])
data_generator = data_generator_object.flow_from_directory(
directory=folder_path,
target_size=target_size,
color_mode="rgb",
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed)
return data_generator
class InspectLossAccuracy(Callback):
def __init__(self):
super(InspectLossAccuracy, self).__init__()
self.train_loss_list = []
self.valid_loss_list = []
self.train_acc_list = []
self.valid_acc_list = []
def on_epoch_end(self, epoch, logs=None):
# Loss
train_loss = logs.get("loss")
valid_loss = logs.get("val_loss")
self.train_loss_list.append(train_loss)
self.valid_loss_list.append(valid_loss)
epoch_index_array = np.arange(len(self.train_loss_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_loss_list, "yellowgreen", label="train_loss")
pylab.plot(epoch_index_array, self.valid_loss_list, "lightskyblue", label="valid_loss")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "{}_loss_curve.png".format(MODEL_NAME)))
pylab.close()
# Accuracy
train_acc = logs.get("acc")
valid_acc = logs.get("val_acc")
self.train_acc_list.append(train_acc)
self.valid_acc_list.append(valid_acc)
epoch_index_array = np.arange(len(self.train_acc_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_acc_list, "yellowgreen", label="train_acc")
pylab.plot(epoch_index_array, self.valid_acc_list, "lightskyblue", label="valid_acc")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "{}_accuracy_curve.png".format(MODEL_NAME)))
pylab.close()
def run():
print("Creating folders ...")
os.makedirs(OPTIMAL_WEIGHTS_FOLDER_PATH, exist_ok=True)
print("Getting the labels ...")
unique_label_list = sorted([folder_name for folder_name in os.listdir(TRAIN_FOLDER_PATH) if os.path.isdir(os.path.join(TRAIN_FOLDER_PATH, folder_name))])
print("Initializing model ...")
model = init_model(image_height=IMAGE_HEIGHT, image_width=IMAGE_WIDTH, unique_label_num=len(unique_label_list))
print("Performing the training procedure ...")
train_generator = load_dataset(ACTUAL_TRAIN_FOLDER_PATH, classes=unique_label_list, class_mode="categorical", shuffle=True, seed=SEED)
valid_generator = load_dataset(ACTUAL_VALID_FOLDER_PATH, classes=unique_label_list, class_mode="categorical", shuffle=True, seed=SEED)
train_sample_num = len(train_generator.filenames)
valid_sample_num = len(valid_generator.filenames)
earlystopping_callback = EarlyStopping(monitor="val_loss", patience=PATIENCE)
modelcheckpoint_callback = ModelCheckpoint(OPTIMAL_WEIGHTS_FILE_PATH, monitor="val_loss", save_best_only=True, save_weights_only=True)
inspectlossaccuracy_callback = InspectLossAccuracy()
model.fit_generator(generator=train_generator,
samples_per_epoch=train_sample_num,
validation_data=valid_generator,
nb_val_samples=valid_sample_num,
callbacks=[earlystopping_callback, modelcheckpoint_callback, inspectlossaccuracy_callback],
nb_epoch=MAXIMUM_EPOCH_NUM, verbose=2)
print("All done!")
if __name__ == "__main__":
run()
| {
"repo_name": "nixingyang/Kaggle-Face-Verification",
"path": "Cervical Cancer Screening/solution_classification_with_additional.py",
"copies": "1",
"size": "8980",
"license": "mit",
"hash": -781850206346939300,
"line_mean": 43.9,
"line_max": 185,
"alpha_frac": 0.6987750557,
"autogenerated": false,
"ratio": 3.4366628396479144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4635437895347914,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from ..core.client import Client
from ..core import Data
from .layer_artist import LayerArtistContainer
__all__ = ['VizClient', 'GenericMplClient']
class VizClient(Client):
"""
The VizClient class provides an interface (and minimal
implementation) for a generic client that creates
visualizations. The goal of VizClient is to provide a reusable way
to organize client plotting code.
Clients which extend VizClient should override the following methods
to perform specific visualization tasks
* _update_axis_labels
* _update_data_plot
* _update_subset_single
* _redraw
* init_layer
VizClient provides a public refresh() method that calls all of
these methods.
Attributes
----------
options: A dictionary of global plot options, to be handled by
subclasses.
"""
def __init__(self, data, options=None):
Client.__init__(self, data)
if not options:
self.options = {}
else:
self.options = options
def _add_data(self, message):
pass
def _remove_data(self, message):
pass
def _update_data(self, message):
"""
Method to handle messages sent by the dataset. Refreshes the display.
"""
self._update_data_plot()
self.refresh()
def _add_subset(self, message):
"""
Method to handle messages sent when subsets are created.
"""
s = message.subset
self.init_layer(s)
self._redraw()
def _update_subset(self, message):
"""
Method to handle messages sent when subsets are modified.
The plot properties of the modified subset are refreshed.
"""
s = message.subset
self._update_subset_single(s, redraw=True)
def refresh(self):
"""
Update and redraw all plot information.
"""
self._update_data_plot()
self._update_subset_plots()
self._update_axis_labels()
self._redraw()
def _redraw(self):
"""
Redraw, but do not update, plot information
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_axis_labels(self):
"""
Sync the axis labels to reflect which components are
currently being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_data_plot(self):
"""
Sync the location of the scatter points to
reflect what components are being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_subset_plots(self, redraw=False):
"""
Sync the location and visual properties
of each point in each subset
"""
junk = [self._update_subset_single(s) for d in self.data
for s in d.subsets]
if redraw:
self._redraw()
def _update_subset_single(self, s, redraw=False):
"""
Update the properties of a subset
Parameters
----------
s: A subset instance
The subset to refresh.
"""
raise NotImplementedError("VizClient Cannot Draw!")
def init_layer(self, layer):
"""Initialize a plot of a data or subset object for the first time.
Parameters
----------
layer: Data or subset instance
"""
raise NotImplementedError()
def init_mpl(figure, axes, wcs=False, axes_factory=None):
if axes is not None and figure is not None and \
axes.figure is not figure:
raise ValueError("Axes and figure are incompatible")
try:
from ..external.wcsaxes import WCSAxesSubplot
except ImportError:
WCSAxesSubplot = None
if axes is not None:
_axes = axes
_figure = axes.figure
else:
_figure = figure or plt.figure()
if wcs and WCSAxesSubplot is not None:
_axes = WCSAxesSubplot(_figure, 111)
_figure.add_axes(_axes)
else:
if axes_factory is not None:
_axes = axes_factory(_figure)
else:
_axes = _figure.add_subplot(1, 1, 1)
try:
_figure.set_tight_layout(True)
except AttributeError: # matplotlib < 1.1
pass
return _figure, _axes
class GenericMplClient(Client):
"""
This client base class handles the logic of adding, removing,
and updating layers.
Subsets are auto-added and removed with datasets.
New subsets are auto-added iff the data has already been added
"""
def __init__(self, data=None, figure=None, axes=None,
artist_container=None, axes_factory=None):
super(GenericMplClient, self).__init__(data=data)
if axes_factory is None:
axes_factory = self.create_axes
figure, self.axes = init_mpl(figure, axes, axes_factory=axes_factory)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._connect()
def create_axes(self, figure):
return figure.add_subplot(1, 1, 1)
def _connect(self):
pass
@property
def collect(self):
# a better name
return self.data
def _redraw(self):
self.axes.figure.canvas.draw()
def new_layer_artist(self, layer):
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _update_layer(self, layer):
raise NotImplementedError
def add_layer(self, layer):
"""
Add a new Data or Subset layer to the plot.
Returns the created layer artist
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.collect:
return
if layer in self.artists:
return self.artists[layer][0]
result = self.new_layer_artist(layer)
self.artists.append(result)
self._update_layer(layer)
self.add_layer(layer.data)
for s in layer.data.subsets:
self.add_layer(s)
if layer.data is layer: # Added Data object. Relimit view
self.axes.autoscale_view(True, True, True)
return result
def remove_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
if isinstance(layer, Data):
list(map(self.remove_layer, layer.subsets))
self._redraw()
def set_visible(self, layer, state):
"""
Toggle a layer's visibility
:param layer: which layer to modify
:param state: True or False
"""
def _update_all(self):
for layer in self.artists.layers:
self._update_layer(layer)
def __contains__(self, layer):
return layer in self.artists
# Hub message handling
def _add_subset(self, message):
self.add_layer(message.sender)
def _remove_subset(self, message):
self.remove_layer(message.sender)
def _update_subset(self, message):
self._update_layer(message.sender)
def _update_data(self, message):
self._update_layer(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def restore_layers(self, layers, context):
""" Re-generate plot layers from a glue-serialized list"""
for l in layers:
l.pop('_type')
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
| {
"repo_name": "JudoWill/glue",
"path": "glue/clients/viz_client.py",
"copies": "1",
"size": "7813",
"license": "bsd-3-clause",
"hash": -6767077010396080000,
"line_mean": 25.9413793103,
"line_max": 87,
"alpha_frac": 0.5959298605,
"autogenerated": false,
"ratio": 4.257765667574932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353695528074932,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from glue.core import Data
from glue.core.message import SettingsChangeMessage
from glue.core.client import Client
from glue.core.layer_artist import LayerArtistContainer
from glue.utils.matplotlib import freeze_margins
__all__ = ['VizClient', 'GenericMplClient']
class VizClient(Client):
"""
The VizClient class provides an interface (and minimal
implementation) for a generic client that creates
visualizations. The goal of VizClient is to provide a reusable way
to organize client plotting code.
Clients which extend VizClient should override the following methods
to perform specific visualization tasks
* _update_axis_labels
* _update_data_plot
* _update_subset_single
* _redraw
* init_layer
VizClient provides a public refresh() method that calls all of
these methods.
Attributes
----------
options: A dictionary of global plot options, to be handled by
subclasses.
"""
def __init__(self, data, options=None):
Client.__init__(self, data)
if not options:
self.options = {}
else:
self.options = options
def _add_data(self, message):
pass
def _remove_data(self, message):
pass
def _update_data(self, message):
"""
Method to handle messages sent by the dataset. Refreshes the display.
"""
self._update_data_plot()
self.refresh()
def _add_subset(self, message):
"""
Method to handle messages sent when subsets are created.
"""
s = message.subset
self.init_layer(s)
self._redraw()
def _update_subset(self, message):
"""
Method to handle messages sent when subsets are modified.
The plot properties of the modified subset are refreshed.
"""
s = message.subset
self._update_subset_single(s, redraw=True)
def refresh(self):
"""
Update and redraw all plot information.
"""
self._update_data_plot()
self._update_subset_plots()
self._update_axis_labels()
self._redraw()
def _redraw(self):
"""
Redraw, but do not update, plot information
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_axis_labels(self):
"""
Sync the axis labels to reflect which components are
currently being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_data_plot(self):
"""
Sync the location of the scatter points to
reflect what components are being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_subset_plots(self, redraw=False):
"""
Sync the location and visual properties
of each point in each subset
"""
junk = [self._update_subset_single(s) for d in self.data
for s in d.subsets]
if redraw:
self._redraw()
def _update_subset_single(self, s, redraw=False):
"""
Update the properties of a subset
Parameters
----------
s: A subset instance
The subset to refresh.
"""
raise NotImplementedError("VizClient Cannot Draw!")
def init_layer(self, layer):
"""Initialize a plot of a data or subset object for the first time.
Parameters
----------
layer: Data or subset instance
"""
raise NotImplementedError()
def set_background_color(axes, color):
axes.figure.set_facecolor(color)
axes.patch.set_facecolor(color)
def set_foreground_color(axes, color):
if hasattr(axes, 'coords'):
axes.coords.frame.set_color(color)
for coord in axes.coords:
coord.set_ticks(color=color)
coord.set_ticklabel(color=color)
coord.axislabels.set_color(color)
else:
for spine in axes.spines.values():
spine.set_color(color)
axes.tick_params(color=color,
labelcolor=color)
axes.xaxis.label.set_color(color)
axes.yaxis.label.set_color(color)
def update_appearance_from_settings(axes):
from glue.config import settings
set_background_color(axes, settings.BACKGROUND_COLOR)
set_foreground_color(axes, settings.FOREGROUND_COLOR)
def init_mpl(figure=None, axes=None, wcs=False, axes_factory=None):
if (axes is not None and figure is not None and
axes.figure is not figure):
raise ValueError("Axes and figure are incompatible")
try:
from glue.external.wcsaxes import WCSAxesSubplot
except ImportError:
WCSAxesSubplot = None
if axes is not None:
_axes = axes
_figure = axes.figure
else:
_figure = figure or plt.figure()
if wcs and WCSAxesSubplot is not None:
_axes = WCSAxesSubplot(_figure, 111)
_figure.add_axes(_axes)
else:
if axes_factory is not None:
_axes = axes_factory(_figure)
else:
_axes = _figure.add_subplot(1, 1, 1)
freeze_margins(_axes, margins=[1, 0.25, 0.50, 0.25])
update_appearance_from_settings(_axes)
return _figure, _axes
class GenericMplClient(Client):
"""
This client base class handles the logic of adding, removing,
and updating layers.
Subsets are auto-added and removed with datasets.
New subsets are auto-added iff the data has already been added
"""
def __init__(self, data=None, figure=None, axes=None,
layer_artist_container=None, axes_factory=None):
super(GenericMplClient, self).__init__(data=data)
if axes_factory is None:
axes_factory = self.create_axes
figure, self.axes = init_mpl(figure, axes, axes_factory=axes_factory)
self.artists = layer_artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._connect()
def create_axes(self, figure):
return figure.add_subplot(1, 1, 1)
def _connect(self):
pass
@property
def collect(self):
# a better name
return self.data
def _redraw(self):
self.axes.figure.canvas.draw()
def new_layer_artist(self, layer):
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _update_layer(self, layer):
raise NotImplementedError
def add_layer(self, layer):
"""
Add a new Data or Subset layer to the plot.
Returns the created layer artist
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.collect:
return
if layer in self.artists:
return self.artists[layer][0]
result = self.new_layer_artist(layer)
self.artists.append(result)
self._update_layer(layer)
self.add_layer(layer.data)
for s in layer.data.subsets:
self.add_layer(s)
if layer.data is layer: # Added Data object. Relimit view
self.axes.autoscale_view(True, True, True)
return result
def remove_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
if isinstance(layer, Data):
list(map(self.remove_layer, layer.subsets))
self._redraw()
def set_visible(self, layer, state):
"""
Toggle a layer's visibility
:param layer: which layer to modify
:param state: True or False
"""
def _update_all(self):
for layer in self.artists.layers:
self._update_layer(layer)
def __contains__(self, layer):
return layer in self.artists
# Hub message handling
def _add_subset(self, message):
self.add_layer(message.sender)
def _remove_subset(self, message):
self.remove_layer(message.sender)
def _update_subset(self, message):
self._update_layer(message.sender)
def _update_data(self, message):
self._update_layer(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def register_to_hub(self, hub):
super(GenericMplClient, self).register_to_hub(hub)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings
or 'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
def restore_layers(self, layers, context):
""" Re-generate plot layers from a glue-serialized list"""
for l in layers:
l.pop('_type')
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/common/viz_client.py",
"copies": "1",
"size": "9349",
"license": "bsd-3-clause",
"hash": 3889255167706819600,
"line_mean": 26.7418397626,
"line_max": 87,
"alpha_frac": 0.6069098299,
"autogenerated": false,
"ratio": 4.133068081343944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239977911243944,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mimetypes
import uuid
from io import BytesIO
from twisted.internet.interfaces import IProtocol
from twisted.internet.defer import Deferred
from twisted.python.components import proxyForInterface
from twisted.python.compat import _PY3, unicode
from twisted.python.filepath import FilePath
from twisted.python.url import URL
from twisted.web.http import urlparse
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
from twisted.web.client import (
FileBodyProducer,
RedirectAgent,
BrowserLikeRedirectAgent,
ContentDecoderAgent,
GzipDecoder,
CookieAgent
)
from twisted.python.components import registerAdapter
from json import dumps as json_dumps
from treq._utils import default_reactor
from treq.auth import add_auth
from treq import multipart
from treq.response import _Response
from requests.cookies import cookiejar_from_dict, merge_cookies
if _PY3:
from urllib.parse import urlunparse, urlencode as _urlencode
def urlencode(query, doseq):
return _urlencode(query, doseq).encode('ascii')
from http.cookiejar import CookieJar
else:
from cookielib import CookieJar
from urlparse import urlunparse
from urllib import urlencode
class _BodyBufferingProtocol(proxyForInterface(IProtocol)):
def __init__(self, original, buffer, finished):
self.original = original
self.buffer = buffer
self.finished = finished
def dataReceived(self, data):
self.buffer.append(data)
self.original.dataReceived(data)
def connectionLost(self, reason):
self.original.connectionLost(reason)
self.finished.errback(reason)
class _BufferedResponse(proxyForInterface(IResponse)):
def __init__(self, original):
self.original = original
self._buffer = []
self._waiters = []
self._waiting = None
self._finished = False
self._reason = None
def _deliverWaiting(self, reason):
self._reason = reason
self._finished = True
for waiter in self._waiters:
for segment in self._buffer:
waiter.dataReceived(segment)
waiter.connectionLost(reason)
def deliverBody(self, protocol):
if self._waiting is None and not self._finished:
self._waiting = Deferred()
self._waiting.addBoth(self._deliverWaiting)
self.original.deliverBody(
_BodyBufferingProtocol(
protocol,
self._buffer,
self._waiting
)
)
elif self._finished:
for segment in self._buffer:
protocol.dataReceived(segment)
protocol.connectionLost(self._reason)
else:
self._waiters.append(protocol)
class HTTPClient(object):
def __init__(self, agent, cookiejar=None,
data_to_body_producer=IBodyProducer):
self._agent = agent
self._cookiejar = cookiejar or cookiejar_from_dict({})
self._data_to_body_producer = data_to_body_producer
def get(self, url, **kwargs):
"""
See :func:`treq.get()`.
"""
return self.request('GET', url, **kwargs)
def put(self, url, data=None, **kwargs):
"""
See :func:`treq.put()`.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""
See :func:`treq.patch()`.
"""
return self.request('PATCH', url, data=data, **kwargs)
def post(self, url, data=None, **kwargs):
"""
See :func:`treq.post()`.
"""
return self.request('POST', url, data=data, **kwargs)
def head(self, url, **kwargs):
"""
See :func:`treq.head()`.
"""
return self.request('HEAD', url, **kwargs)
def delete(self, url, **kwargs):
"""
See :func:`treq.delete()`.
"""
return self.request('DELETE', url, **kwargs)
def request(self, method, url, **kwargs):
"""
See :func:`treq.request()`.
"""
method = method.encode('ascii').upper()
# Join parameters provided in the URL
# and the ones passed as argument.
params = kwargs.get('params')
if params:
url = _combine_query_params(url, params)
if isinstance(url, unicode):
url = URL.fromText(url).asURI().asText().encode('ascii')
# Convert headers dictionary to
# twisted raw headers format.
headers = kwargs.get('headers')
if headers:
if isinstance(headers, dict):
h = Headers({})
for k, v in headers.items():
if isinstance(v, (bytes, unicode)):
h.addRawHeader(k, v)
elif isinstance(v, list):
h.setRawHeaders(k, v)
headers = h
else:
headers = Headers({})
# Here we choose a right producer
# based on the parameters passed in.
bodyProducer = None
data = kwargs.get('data')
files = kwargs.get('files')
# since json=None needs to be serialized as 'null', we need to
# explicitly check kwargs for this key
has_json = 'json' in kwargs
if files:
# If the files keyword is present we will issue a
# multipart/form-data request as it suits better for cases
# with files and/or large objects.
files = list(_convert_files(files))
boundary = str(uuid.uuid4()).encode('ascii')
headers.setRawHeaders(
b'content-type', [
b'multipart/form-data; boundary=' + boundary])
if data:
data = _convert_params(data)
else:
data = []
bodyProducer = multipart.MultiPartProducer(
data + files, boundary=boundary)
elif data:
# Otherwise stick to x-www-form-urlencoded format
# as it's generally faster for smaller requests.
if isinstance(data, (dict, list, tuple)):
headers.setRawHeaders(
b'content-type', [b'application/x-www-form-urlencoded'])
data = urlencode(data, doseq=True)
bodyProducer = self._data_to_body_producer(data)
elif has_json:
# If data is sent as json, set Content-Type as 'application/json'
headers.setRawHeaders(
b'content-type', [b'application/json; charset=UTF-8'])
content = kwargs['json']
json = json_dumps(content, separators=(u',', u':')).encode('utf-8')
bodyProducer = self._data_to_body_producer(json)
cookies = kwargs.get('cookies', {})
if not isinstance(cookies, CookieJar):
cookies = cookiejar_from_dict(cookies)
cookies = merge_cookies(self._cookiejar, cookies)
wrapped_agent = CookieAgent(self._agent, cookies)
if kwargs.get('allow_redirects', True):
if kwargs.get('browser_like_redirects', False):
wrapped_agent = BrowserLikeRedirectAgent(wrapped_agent)
else:
wrapped_agent = RedirectAgent(wrapped_agent)
wrapped_agent = ContentDecoderAgent(wrapped_agent,
[(b'gzip', GzipDecoder)])
auth = kwargs.get('auth')
if auth:
wrapped_agent = add_auth(wrapped_agent, auth)
d = wrapped_agent.request(
method, url, headers=headers,
bodyProducer=bodyProducer)
timeout = kwargs.get('timeout')
if timeout:
delayedCall = default_reactor(kwargs.get('reactor')).callLater(
timeout, d.cancel)
def gotResult(result):
if delayedCall.active():
delayedCall.cancel()
return result
d.addBoth(gotResult)
if not kwargs.get('unbuffered', False):
d.addCallback(_BufferedResponse)
return d.addCallback(_Response, cookies)
def _convert_params(params):
if hasattr(params, "iteritems"):
return list(sorted(params.iteritems()))
elif hasattr(params, "items"):
return list(sorted(params.items()))
elif isinstance(params, (tuple, list)):
return list(params)
else:
raise ValueError("Unsupported format")
def _convert_files(files):
"""Files can be passed in a variety of formats:
* {'file': open("bla.f")}
* {'file': (name, open("bla.f"))}
* {'file': (name, content-type, open("bla.f"))}
* Anything that has iteritems method, e.g. MultiDict:
MultiDict([(name, open()), (name, open())]
Our goal is to standardize it to unified form of:
* [(param, (file name, content type, producer))]
"""
if hasattr(files, "iteritems"):
files = files.iteritems()
elif hasattr(files, "items"):
files = files.items()
for param, val in files:
file_name, content_type, fobj = (None, None, None)
if isinstance(val, tuple):
if len(val) == 2:
file_name, fobj = val
elif len(val) == 3:
file_name, content_type, fobj = val
else:
fobj = val
if hasattr(fobj, "name"):
file_name = FilePath(fobj.name).basename()
if not content_type:
content_type = _guess_content_type(file_name)
yield (param, (file_name, content_type, IBodyProducer(fobj)))
def _combine_query_params(url, params):
parsed_url = urlparse(url.encode('ascii'))
qs = []
if parsed_url.query:
qs.extend([parsed_url.query, b'&'])
qs.append(urlencode(params, doseq=True))
return urlunparse((parsed_url[0], parsed_url[1],
parsed_url[2], parsed_url[3],
b''.join(qs), parsed_url[5]))
def _from_bytes(orig_bytes):
return FileBodyProducer(BytesIO(orig_bytes))
def _from_file(orig_file):
return FileBodyProducer(orig_file)
def _guess_content_type(filename):
if filename:
guessed = mimetypes.guess_type(filename)[0]
else:
guessed = None
return guessed or 'application/octet-stream'
registerAdapter(_from_bytes, bytes, IBodyProducer)
registerAdapter(_from_file, BytesIO, IBodyProducer)
if not _PY3:
from StringIO import StringIO
registerAdapter(_from_file, StringIO, IBodyProducer)
registerAdapter(_from_file, file, IBodyProducer)
else:
import io
# file()/open() equiv on Py3
registerAdapter(_from_file, io.BufferedReader, IBodyProducer)
| {
"repo_name": "pexip/os-python-treq",
"path": "src/treq/client.py",
"copies": "1",
"size": "10866",
"license": "mit",
"hash": 1692725451840600800,
"line_mean": 30.224137931,
"line_max": 79,
"alpha_frac": 0.5869685257,
"autogenerated": false,
"ratio": 4.2695481335952845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356516659295284,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import pytest
import workflows.contrib.start_service
def test_get_command_line_help(capsys):
'''Running the start_service script with --help should display command line help and exit.'''
with pytest.raises(SystemExit):
workflows.contrib.start_service.ServiceStarter().run(['--help'], program_name='sentinelvalue')
out, err = capsys.readouterr()
assert 'Usage: sentinelvalue' in out
@mock.patch('workflows.contrib.start_service.OptionParser')
@mock.patch('workflows.contrib.start_service.workflows.transport.lookup')
@mock.patch('workflows.contrib.start_service.workflows.frontend')
@mock.patch('workflows.contrib.start_service.workflows.services')
def test_script_initialises_transport_and_starts_frontend(mock_services, mock_frontend, mock_tlookup, mock_parser):
'''Check that the start_service script sets up the transport mechanism and the frontend properly.
Correct service should be selected and the frontend started.'''
mock_options = mock.Mock()
mock_options.service = 'someservice'
mock_options.transport = mock.sentinel.transport
mock_parser.return_value.parse_args.return_value = (mock_options, mock.Mock())
mock_services.get_known_services.return_value = { 'SomeService': None }
workflows.contrib.start_service.ServiceStarter().run(cmdline_args=['-s', 'some'], version=mock.sentinel.version)
mock_tlookup.assert_called_once_with(mock.sentinel.transport)
mock_parser.assert_called_once_with(usage=mock.ANY, version=mock.sentinel.version)
mock_frontend.Frontend.assert_called_once_with(service='SomeService', transport=mock_tlookup.return_value)
mock_frontend.Frontend.return_value.run.assert_called_once_with()
| {
"repo_name": "xia2/workflows",
"path": "workflows/contrib/test_start_service.py",
"copies": "1",
"size": "1742",
"license": "bsd-3-clause",
"hash": 8887572806167714000,
"line_mean": 53.4375,
"line_max": 115,
"alpha_frac": 0.7778415614,
"autogenerated": false,
"ratio": 3.6751054852320677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49529470466320674,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import pytest
import workflows
import workflows.recipe
def check_message_handling_via_unwrapper(callback, recipient, transport, rw_mock, allow_non_recipe):
'''Test callback function of a recipe wrapper.'''
# This message does not contain an encoded recipe. It should be passed through directly.
header = { 'random-header': mock.sentinel.ID }
message = mock.Mock()
recipient.reset_mock()
transport.reset_mock()
callback(header, message)
if allow_non_recipe:
recipient.assert_called_once_with(None, header, message)
transport.nack.assert_not_called()
else:
transport.nack.assert_called_once_with(header)
recipient.assert_not_called()
# This message does not contain an encoded recipe. It should be passed through directly.
header = { 'workflows-recipe': "False" }
recipient.reset_mock()
transport.reset_mock()
callback(header, message)
if allow_non_recipe:
recipient.assert_called_once_with(None, header, message)
transport.nack.assert_not_called()
else:
transport.nack.assert_called_once_with(header)
recipient.assert_not_called()
# This message contains an encoded recipe. It should be interpreted and the payload passed
# through with a helper object for simple recipe-conformant replies.
header = { 'workflows-recipe': "True" }
message = {
'recipe': mock.sentinel.recipe,
'recipe-pointer': mock.sentinel.recipe_pointer,
'recipe-path': [],
'environment': {
'ID': mock.sentinel.GUID,
'source': mock.sentinel.source,
'timestamp': mock.sentinel.timestamp,
},
'payload': mock.sentinel.payload,
}
recipient.reset_mock()
transport.reset_mock()
callback(header, message)
recipient.assert_called_once_with(rw_mock.return_value, header, message['payload'])
rw_mock.assert_called_once_with(message=message, transport=transport)
transport.nack.assert_not_called()
@mock.patch('workflows.recipe.RecipeWrapper', autospec=True)
def test_wrapping_a_subscription(rw_mock):
'''Test queue subscription with recipe wrapper.'''
transport, recipient = mock.Mock(), mock.Mock()
sid = workflows.recipe.wrap_subscribe(transport, mock.sentinel.channel, recipient,
mock.sentinel.irrelevant_extra_arg, keyword=mock.sentinel.keyword_arg)
# Channel and any extra arguments must be passed on to transport layer.
# Callback function will obviously change.
transport.subscribe.assert_called_once_with(mock.sentinel.channel, mock.ANY,
mock.sentinel.irrelevant_extra_arg, keyword=mock.sentinel.keyword_arg)
callback = transport.subscribe.call_args[0][1]
assert callback != recipient
assert sid == transport.subscribe.return_value
# Part II: Message handling via unwrapper
check_message_handling_via_unwrapper(callback, recipient, transport, rw_mock, False)
@mock.patch('workflows.recipe.RecipeWrapper', autospec=True)
def test_wrapping_a_subscription_allowing_non_recipe_messages(rw_mock):
'''Test queue subscription with recipe wrapper allowing non-recipe messages to pass through.'''
transport, recipient = mock.Mock(), mock.Mock()
sid = workflows.recipe.wrap_subscribe(transport, mock.sentinel.channel, recipient,
mock.sentinel.irrelevant_extra_arg, keyword=mock.sentinel.keyword_arg,
allow_non_recipe_messages=True)
transport.subscribe.assert_called_once()
callback = transport.subscribe.call_args[0][1]
assert sid == transport.subscribe.return_value
# Part II: Message handling via unwrapper
check_message_handling_via_unwrapper(callback, recipient, transport, rw_mock, True)
@mock.patch('workflows.recipe.RecipeWrapper', autospec=True)
def test_wrapping_a_broadcast_subscription(rw_mock):
'''Test topic subscription with recipe wrapper.'''
transport, recipient = mock.Mock(), mock.Mock()
sid = workflows.recipe.wrap_subscribe_broadcast(transport, mock.sentinel.channel, recipient,
mock.sentinel.irrelevant_extra_arg, keyword=mock.sentinel.keyword_arg)
# Channel and any extra arguments must be passed on to transport layer.
# Callback function will obviously change.
transport.subscribe_broadcast.assert_called_once_with(mock.sentinel.channel, mock.ANY,
mock.sentinel.irrelevant_extra_arg, keyword=mock.sentinel.keyword_arg)
callback = transport.subscribe_broadcast.call_args[0][1]
assert callback != recipient
assert sid == transport.subscribe_broadcast.return_value
# Part II: Message handling via unwrapper
check_message_handling_via_unwrapper(callback, recipient, transport, rw_mock, False)
def test_wrapping_a_subscription_with_log_extension():
'''Test queue subscription with recipe wrapper, passing a log_extender function.
If the recipe contains useful contextual information for log messages,
such as a unique ID which can be used to connect all messages originating
from the same recipe, then this information should be passed to the
log_extender function.'''
transport, lext = mock.Mock(), mock.Mock()
# Set up context manager mock
lext.return_value.__enter__ = lext.enter
lext.return_value.__exit__ = lext.exit
def recipient(*args, **kwargs):
'''Dummy function accepting everything but must be run in log_extender context.'''
lext.enter.assert_called_once()
lext.exit.assert_not_called()
lext.recipient()
sid = workflows.recipe.wrap_subscribe(transport, mock.sentinel.channel, recipient,
log_extender=lext)
# Channel and any extra arguments must be passed on to transport layer.
# Callback function will obviously change.
transport.subscribe.assert_called_once_with(mock.sentinel.channel, mock.ANY)
callback = transport.subscribe.call_args[0][1]
assert callback != recipient
assert sid == transport.subscribe.return_value
# Part II: Message handling
# This message does not contain an encoded recipe. It should be passed through directly.
header = { 'random-header': mock.sentinel.ID }
callback(header, mock.Mock())
lext.assert_not_called()
# This message does not contain an encoded recipe. It should be passed through directly.
header = { 'workflows-recipe': "False" }
callback(header, mock.Mock())
lext.assert_not_called()
# This message contains an encoded recipe. The environment ID should be passed to the
# log_extender context manager.
header = { 'workflows-recipe': "True" }
message = {
'recipe': { 1: {} },
'recipe-pointer': 1,
'recipe-path': [],
'environment': {
'ID': mock.sentinel.GUID,
'source': mock.sentinel.source,
'timestamp': mock.sentinel.timestamp,
},
'payload': mock.sentinel.payload,
}
callback(header, message)
lext.assert_called_once_with('recipe_ID', mock.sentinel.GUID)
lext.enter.assert_called_once()
lext.exit.assert_called_once()
lext.recipient.assert_called_once()
| {
"repo_name": "xia2/workflows",
"path": "workflows/recipe/test_wrap_subscription.py",
"copies": "1",
"size": "6961",
"license": "bsd-3-clause",
"hash": -6063198979366495000,
"line_mean": 39.9470588235,
"line_max": 100,
"alpha_frac": 0.7251831633,
"autogenerated": false,
"ratio": 3.88015607580825,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510533923910825,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import workflows.contrib.status_monitor as status_monitor
@mock.patch('workflows.contrib.status_monitor.curses')
@mock.patch('workflows.contrib.status_monitor.time')
@mock.patch('workflows.contrib.status_monitor.workflows.transport')
def test_status_monitor_connects_to_transport_layer(mock_transport, mock_time, mock_curses):
'''Check that the status monitor properly connects to the transport layer and sets up relevant subscriptions.'''
mock_time.sleep.side_effect = KeyboardInterrupt()
status_monitor.Monitor()
mock_transport.lookup.assert_called_once_with(None)
mock_transport.lookup.return_value.assert_called_once_with()
mock_transport.lookup.return_value.return_value.connect.assert_called_once_with()
status_monitor.Monitor(transport="some_transport")
mock_transport.lookup.assert_called_with("some_transport")
mock_trn = mock.Mock()
mon = status_monitor.Monitor(mock_trn)
mock_trn.assert_called_once_with()
mock_trn.return_value.connect.assert_called_once_with()
mock_trn.return_value.subscribe_broadcast.assert_called_once_with(mock.ANY, mon.update_status, retroactive=True)
mon.run()
mock_curses.wrapper.assert_called_once()
run_method = mock_curses.wrapper.call_args[0][0]
run_method(mock.Mock())
mock_trn.return_value.disconnect.assert_called_once()
| {
"repo_name": "xia2/workflows",
"path": "workflows/contrib/test_status_monitor.py",
"copies": "1",
"size": "1385",
"license": "bsd-3-clause",
"hash": -8902298015370388000,
"line_mean": 40.9696969697,
"line_max": 114,
"alpha_frac": 0.7761732852,
"autogenerated": false,
"ratio": 3.4282178217821784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47043911069821787,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import workflows.frontend.utilization
from workflows.services.common_service import Status
def about(value, tolerance):
'''Create an object that can be compared against a number and allows a tolerance.'''
class Comparator():
'''A helper class to compare against a value with a tolerance.'''
def __le__(self, other):
return other >= value-tolerance
def __eq__(self, other):
return value-tolerance <= other <= value+tolerance
def __ge__(self, other):
return other <= value+tolerance
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<%s +- %s>" % (str(value), str(tolerance))
return Comparator()
def test_near_equality_helper_class():
'''Quick test of the above helper class.'''
three = about(3, 0.11)
assert 2 <= three
assert 2 != three
assert 2.8 != three
assert 2.9 == three
assert 3.0 == three
assert 3 == three
assert 3.1 == three
assert 3.2 != three
assert 4 != three
assert 4 >= three
def test_get_empty_statistics_report():
'''Create a UtilizationStatistics object and get the initial report. It should report being in the 'NEW' status 100% of the time.'''
stat = workflows.frontend.utilization.UtilizationStatistics(summation_period=10)
assert stat.report() == { Status.NEW.intval: 1.0 }
@mock.patch('workflows.frontend.utilization.time')
def test_statistics_report_contains_correctly_aggregated_information(t):
'''Create a UtilizationStatistics object and feed in various status changes. Check that the aggregated reports correspond to expected values.'''
t.time.return_value = 100000
stat = workflows.frontend.utilization.UtilizationStatistics(summation_period=10)
t.time.return_value = 100005
assert stat.report() == { Status.NEW.intval: 1.0 }
stat.update_status( Status.IDLE.intval )
t.time.return_value = 100010
assert stat.report() == { Status.NEW.intval: about(0.5, 0.01), Status.IDLE.intval: about(0.5, 0.01) }
stat.update_status( 127 )
t.time.return_value = 100012
assert stat.report() == { Status.NEW.intval: about(0.3, 0.01), Status.IDLE.intval: about(0.5, 0.01), 127: about(0.2, 0.01) }
stat.update_status( Status.IDLE.intval )
t.time.return_value = 100013
assert stat.report() == { Status.NEW.intval: about(0.2, 0.01), Status.IDLE.intval: about(0.6, 0.01), 127: about(0.2, 0.01) }
stat.update_status( 128 )
t.time.return_value = 100016
assert stat.report() == { Status.IDLE.intval: about(0.5, 0.01), 127: about(0.2, 0.01), 128: about(0.3, 0.01) }
t.time.return_value = 100017
assert stat.report() == { Status.IDLE.intval: about(0.4, 0.01), 127: about(0.2, 0.01), 128: about(0.4, 0.01) }
t.time.return_value = 100018
assert stat.report() == { Status.IDLE.intval: about(0.3, 0.01), 127: about(0.2, 0.01), 128: about(0.5, 0.01) }
t.time.return_value = 100019
assert stat.report() == { Status.IDLE.intval: about(0.2, 0.01), 127: about(0.2, 0.01), 128: about(0.6, 0.01) }
t.time.return_value = 100020
assert stat.report() == { Status.IDLE.intval: about(0.1, 0.01), 127: about(0.2, 0.01), 128: about(0.7, 0.01) }
t.time.return_value = 100021
assert stat.report() == { Status.IDLE.intval: about(0.1, 0.01), 127: about(0.1, 0.01), 128: about(0.8, 0.01) }
t.time.return_value = 100022
assert stat.report() == { Status.IDLE.intval: about(0.1, 0.01), 127: about(0.0, 0.01), 128: about(0.9, 0.01) }
t.time.return_value = 100022.001
assert stat.report() == { Status.IDLE.intval: about(0.1, 0.01), 128: about(0.9, 0.01) }
t.time.return_value = 100023
assert stat.report() == { Status.IDLE.intval: about(0.0, 0.01), 128: about(1.0, 0.01) }
t.time.return_value = 100023.001
assert stat.report() == { 128: 1.0 }
| {
"repo_name": "xia2/workflows",
"path": "workflows/frontend/test_utilization.py",
"copies": "1",
"size": "3800",
"license": "bsd-3-clause",
"hash": -4043378553939797000,
"line_mean": 47.7179487179,
"line_max": 146,
"alpha_frac": 0.6684210526,
"autogenerated": false,
"ratio": 2.9641185647425896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41325396173425893,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import workflows.services
import workflows.services.sample_producer
def test_service_can_be_looked_up():
'''Attempt to look up the service by its name'''
service_class = workflows.services.lookup('SampleProducer')
assert service_class == workflows.services.sample_producer.SampleProducer
def test_service_registers_idle_timer():
'''Check that the service registers an idle event handler.'''
p = workflows.services.sample_producer.SampleProducer()
mock_idlereg = mock.Mock()
setattr(p, '_register_idle', mock_idlereg)
p.initializing()
mock_idlereg.assert_called_once_with(mock.ANY, p.create_message)
def test_service_produces_messages():
'''Check that the producer produces messages in the idle event handler.'''
p = workflows.services.sample_producer.SampleProducer()
mock_transport = mock.Mock()
setattr(p, '_transport', mock_transport)
p.initializing()
assert not mock_transport.send.called
p.create_message()
mock_transport.send.assert_called_once()
p.create_message()
assert mock_transport.send.call_count == 2
calls = mock_transport.send.call_args_list
assert calls[0][0][0] == calls[1][0][0] # same destination
assert calls[0][0][1] != calls[1][0][1] # different message
| {
"repo_name": "xia2/workflows",
"path": "workflows/services/test_sample_service_producer.py",
"copies": "1",
"size": "1308",
"license": "bsd-3-clause",
"hash": -4093129710211143000,
"line_mean": 32.5384615385,
"line_max": 76,
"alpha_frac": 0.7377675841,
"autogenerated": false,
"ratio": 3.5835616438356164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4821329227935617,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mock
import workflows.services
import workflows.services.sample_transaction
def test_services_can_be_looked_up():
'''Attempt to look up the services by their names'''
service_class = workflows.services.lookup('SampleTxn')
assert service_class == workflows.services.sample_transaction.SampleTxn
service_class = workflows.services.lookup('SampleTxnProducer')
assert service_class == workflows.services.sample_transaction.SampleTxnProducer
def test_txnproducer_registers_idle_timer():
'''Check that the TXN producer registers an idle event handler.'''
p = workflows.services.sample_transaction.SampleTxnProducer()
mock_idlereg = mock.Mock()
setattr(p, '_register_idle', mock_idlereg)
p.initializing()
mock_idlereg.assert_called_once_with(mock.ANY, p.create_message)
def test_txnproducer_produces_messages():
'''Check that the TXN producer produces messages in the idle event handler.'''
p = workflows.services.sample_transaction.SampleTxnProducer()
mock_transport = mock.Mock()
setattr(p, '_transport', mock_transport)
p.initializing()
assert not mock_transport.send.called
p.create_message()
mock_transport.send.assert_called_once()
p.create_message()
assert mock_transport.send.call_count == 2
calls = mock_transport.send.call_args_list
assert calls[0][0][0] == calls[1][0][0] # same destination
assert calls[0][0][1] != calls[1][0][1] # different message
def test_txnservice_subscribes_to_channel():
'''Check that the service subscribes to a queue with acknowledgements enabled.'''
p = workflows.services.sample_transaction.SampleTxn()
mock_transport = mock.Mock()
setattr(p, '_transport', mock_transport)
p.initializing()
mock_transport.subscribe.assert_called_once_with(mock.ANY, p.receive_message, acknowledgement=True)
def test_txnservice_crash_function_crashes_sometimes():
'''The crash should happen sometimes. Neither never nor always.'''
fn = workflows.services.sample_transaction.SampleTxn.crashpoint
assert any(fn() for i in range(100))
assert not all(fn() for i in range(100))
def setup_txnservice(crashpattern):
'''Common fixture for TXN tests'''
p = workflows.services.sample_transaction.SampleTxn()
mock_crash, mock_transport = mock.Mock(), mock.Mock()
p.crashpoint = mock_crash
mock_crash.side_effect = crashpattern
mock_transport.transaction_begin.return_value = mock.sentinel.txn
mock_transport.subscribe.return_value = mock.sentinel.subid
setattr(p, '_transport', mock_transport)
header = { 'message-id': mock.sentinel.message_id }
message = mock.sentinel.message
p.initializing()
p.receive_message(header, message)
return p, mock_transport
def test_txnservice_uses_transactions_correctly():
'''The TXN service should consume messages in a transaction. When the service fails the messages must not be consumed.'''
p, mock_transport = setup_txnservice([True])
mock_transport.transaction_begin.assert_called_once_with()
mock_transport.ack.assert_not_called()
mock_transport.send.assert_not_called()
mock_transport.transaction_commit.assert_not_called()
mock_transport.transaction_abort.assert_called_once_with(mock.sentinel.txn)
p, mock_transport = setup_txnservice([False, True])
mock_transport.transaction_begin.assert_called_once_with()
mock_transport.ack.assert_called_once_with(mock.sentinel.message_id, mock.sentinel.subid, transaction=mock.sentinel.txn)
mock_transport.send.assert_not_called()
mock_transport.transaction_commit.assert_not_called()
mock_transport.transaction_abort.assert_called_once_with(mock.sentinel.txn)
p, mock_transport = setup_txnservice([False, False, True])
mock_transport.transaction_begin.assert_called_once_with()
mock_transport.ack.assert_called_once_with(mock.sentinel.message_id, mock.sentinel.subid, transaction=mock.sentinel.txn)
mock_transport.send.assert_called_once_with(mock.ANY, mock.sentinel.message, transaction=mock.sentinel.txn)
mock_transport.transaction_commit.assert_not_called()
mock_transport.transaction_abort.assert_called_once_with(mock.sentinel.txn)
p, mock_transport = setup_txnservice([False, False, False])
mock_transport.transaction_begin.assert_called_once_with()
mock_transport.ack.assert_called_once_with(mock.sentinel.message_id, mock.sentinel.subid, transaction=mock.sentinel.txn)
mock_transport.send.assert_called_once_with(mock.ANY, mock.sentinel.message, transaction=mock.sentinel.txn)
mock_transport.transaction_commit.assert_called_once_with(mock.sentinel.txn)
mock_transport.transaction_abort.assert_not_called()
| {
"repo_name": "xia2/workflows",
"path": "workflows/services/test_sample_service_transaction.py",
"copies": "1",
"size": "4644",
"license": "bsd-3-clause",
"hash": 4286979759727570000,
"line_mean": 41.6055045872,
"line_max": 123,
"alpha_frac": 0.7652885444,
"autogenerated": false,
"ratio": 3.5342465753424657,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47995351197424657,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import multiprocessing as mp
import numpy as np
from numba import cuda
def parent():
arr = np.arange(10)
darr = cuda.to_device(arr)
ipch = darr.get_ipc_handle()
# launch child proc
mpc = mp.get_context('spawn')
queue = mpc.Queue()
childproc = mpc.Process(target=child, args=[queue])
childproc.start()
queue.put(ipch)
childproc.join(1)
hostarr = queue.get()
print('original array:', arr)
# device array is modified by child process
print('device array:', darr.copy_to_host())
print('returned host array', hostarr)
# verify
np.testing.assert_equal(darr.copy_to_host(), arr + 1)
np.testing.assert_equal(hostarr, arr * 2)
@cuda.jit
def plus1(arr):
i = cuda.grid(1)
if i < arr.size:
arr[i] += 1
def child(queue):
ipch = queue.get()
with ipch as darr:
# keep a copy
arr = darr.copy_to_host()
# modify host array
arr *= 2
# modify device array directly
plus1[(darr.size + 64 - 1) // 64, 64](darr)
# send host array back
queue.put(arr)
def main():
parent()
if __name__ == '__main__':
main()
| {
"repo_name": "cpcloud/numba",
"path": "examples/cuda_ipc.py",
"copies": "2",
"size": "1225",
"license": "bsd-2-clause",
"hash": -5069240495452699000,
"line_mean": 19.4166666667,
"line_max": 64,
"alpha_frac": 0.5991836735,
"autogenerated": false,
"ratio": 3.2666666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9865850340166666,
"avg_score": 0,
"num_lines": 60
} |
from __future__ import absolute_import, division, print_function
import multiprocessing
import pickle
import sys
from .async import get_async # TODO: get better get
from .context import _globals
from .optimize import fuse, cull
import cloudpickle
from toolz import curry
if sys.version_info.major < 3:
import copy_reg as copyreg
else:
import copyreg
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
# type(set.union) is used as a proxy to <class 'method_descriptor'>
copyreg.pickle(type(set.union), _reduce_method_descriptor)
def _dumps(x):
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
_loads = pickle.loads
def _process_get_id():
return multiprocessing.current_process().ident
def get(dsk, keys, num_workers=None, func_loads=None, func_dumps=None,
optimize_graph=True, **kwargs):
""" Multiprocessed get function appropriate for Bags
Parameters
----------
dsk : dict
dask graph
keys : object or list
Desired results from graph
num_workers : int
Number of worker processes (defaults to number of cores)
func_dumps : function
Function to use for function serialization
(defaults to cloudpickle.dumps)
func_loads : function
Function to use for function deserialization
(defaults to cloudpickle.loads)
optimize_graph : bool
If True [default], `fuse` is applied to the graph before computation.
"""
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(num_workers)
cleanup = True
else:
cleanup = False
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = pickle_apply_async(pool.apply_async,
func_dumps=func_dumps,
func_loads=func_loads)
# Optimize Dask
dsk2, dependencies = cull(dsk, keys)
if optimize_graph:
dsk3, dependencies = fuse(dsk2, keys, dependencies)
else:
dsk3 = dsk2
try:
# Run
result = get_async(apply_async, len(pool._pool), dsk3, keys,
queue=queue, get_id=_process_get_id, **kwargs)
finally:
if cleanup:
pool.close()
return result
def apply_func(sfunc, may_fail, wont_fail, loads=None):
loads = loads or _globals.get('loads') or _loads
func = loads(sfunc)
key, queue, get_id, raise_on_exception = loads(wont_fail)
try:
task, data = loads(may_fail)
except Exception as e:
# Need a new reference for the exception, as `e` falls out of scope in
# python 3
exception = e
def serialization_failure():
raise exception
task = (serialization_failure,)
data = {}
return func(key, task, data, queue, get_id,
raise_on_exception=raise_on_exception)
@curry
def pickle_apply_async(apply_async, func, args=(),
func_loads=None, func_dumps=None):
# XXX: To deal with deserialization errors of tasks, this version of
# apply_async doesn't actually match that of `pool.apply_async`. It's
# customized to fit the signature of `dask.async.execute_task`, which is
# the only function ever actually passed as `func`. This is a bit of a
# hack, but it works pretty well. If the signature of `execute_task`
# changes, then this will need to be changed as well.
dumps = func_dumps or _globals.get('func_dumps') or _dumps
key, task, data, queue, get_id, raise_on_exception = args
sfunc = dumps(func)
may_fail = dumps((task, data))
wont_fail = dumps((key, queue, get_id, raise_on_exception))
return apply_async(curry(apply_func, loads=func_loads),
args=[sfunc, may_fail, wont_fail])
| {
"repo_name": "cowlicks/dask",
"path": "dask/multiprocessing.py",
"copies": "2",
"size": "3838",
"license": "bsd-3-clause",
"hash": 2128479518367291600,
"line_mean": 30.2032520325,
"line_max": 78,
"alpha_frac": 0.6354872329,
"autogenerated": false,
"ratio": 3.8303393213572856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008804768256280264,
"num_lines": 123
} |
from __future__ import absolute_import, division, print_function
import multiprocessing
import traceback
import pickle
import sys
from .local import get_async # TODO: get better get
from .context import _globals
from .optimize import fuse, cull
import cloudpickle
if sys.version_info.major < 3:
import copy_reg as copyreg
else:
import copyreg
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
# type(set.union) is used as a proxy to <class 'method_descriptor'>
copyreg.pickle(type(set.union), _reduce_method_descriptor)
def _dumps(x):
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
_loads = pickle.loads
def _process_get_id():
return multiprocessing.current_process().ident
# -- Remote Exception Handling --
# By default, tracebacks can't be serialized using pickle. However, the
# `tblib` library can enable support for this. Since we don't mandate
# that tblib is installed, we do the following:
#
# - If tblib is installed, use it to serialize the traceback and reraise
# in the scheduler process
# - Otherwise, use a ``RemoteException`` class to contain a serialized
# version of the formatted traceback, which will then print in the
# scheduler process.
#
# To enable testing of the ``RemoteException`` class even when tblib is
# installed, we don't wrap the class in the try block below
class RemoteException(Exception):
""" Remote Exception
Contains the exception and traceback from a remotely run task
"""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __str__(self):
return (str(self.exception) + "\n\n"
"Traceback\n"
"---------\n" +
self.traceback)
def __dir__(self):
return sorted(set(dir(type(self)) +
list(self.__dict__) +
dir(self.exception)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return getattr(self.exception, key)
exceptions = dict()
def remote_exception(exc, tb):
""" Metaclass that wraps exception type in RemoteException """
if type(exc) in exceptions:
typ = exceptions[type(exc)]
return typ(exc, tb)
else:
try:
typ = type(exc.__class__.__name__,
(RemoteException, type(exc)),
{'exception_type': type(exc)})
exceptions[type(exc)] = typ
return typ(exc, tb)
except TypeError:
return exc
try:
import tblib.pickling_support
tblib.pickling_support.install()
from dask.compatibility import reraise
def _pack_traceback(tb):
return tb
except ImportError:
def _pack_traceback(tb):
return ''.join(traceback.format_tb(tb))
def reraise(exc, tb):
exc = remote_exception(exc, tb)
raise exc
def pack_exception(e, dumps):
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = _pack_traceback(exc_traceback)
try:
result = dumps((e, tb))
except BaseException as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = _pack_traceback(exc_traceback)
result = dumps((e, tb))
return result
def get(dsk, keys, num_workers=None, func_loads=None, func_dumps=None,
optimize_graph=True, **kwargs):
""" Multiprocessed get function appropriate for Bags
Parameters
----------
dsk : dict
dask graph
keys : object or list
Desired results from graph
num_workers : int
Number of worker processes (defaults to number of cores)
func_dumps : function
Function to use for function serialization
(defaults to cloudpickle.dumps)
func_loads : function
Function to use for function deserialization
(defaults to cloudpickle.loads)
optimize_graph : bool
If True [default], `fuse` is applied to the graph before computation.
"""
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(num_workers,
initializer=initialize_worker_process)
cleanup = True
else:
cleanup = False
# Optimize Dask
dsk2, dependencies = cull(dsk, keys)
if optimize_graph:
dsk3, dependencies = fuse(dsk2, keys, dependencies)
else:
dsk3 = dsk2
# We specify marshalling functions in order to catch serialization
# errors and report them to the user.
loads = func_loads or _globals.get('func_loads') or _loads
dumps = func_dumps or _globals.get('func_dumps') or _dumps
# Note former versions used a multiprocessing Manager to share
# a Queue between parent and workers, but this is fragile on Windows
# (issue #1652).
try:
# Run
result = get_async(pool.apply_async, len(pool._pool), dsk3, keys,
get_id=_process_get_id, dumps=dumps, loads=loads,
pack_exception=pack_exception,
raise_exception=reraise, **kwargs)
finally:
if cleanup:
pool.close()
return result
def initialize_worker_process():
"""
Initialize a worker process before running any tasks in it.
"""
# If Numpy is already imported, presumably its random state was
# inherited from the parent => re-seed it.
np = sys.modules.get('numpy')
if np is not None:
np.random.seed()
| {
"repo_name": "mraspaud/dask",
"path": "dask/multiprocessing.py",
"copies": "2",
"size": "5571",
"license": "bsd-3-clause",
"hash": -1960134324882675700,
"line_mean": 28.015625,
"line_max": 77,
"alpha_frac": 0.6259199426,
"autogenerated": false,
"ratio": 4.105379513633014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5731299456233013,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import multiprocessing
import mock
import pytest
import workflows.frontend
from workflows.services.common_service import CommonService
### Helper classes used in tests ##############################################
class ServiceCrashingOnInit(CommonService):
'''A service that raises an unhandled exception.'''
@staticmethod
def initializing():
'''Raise AssertionError.
This should set the error state, kill the service and cause the frontend
to leave its main loop.'''
assert False # pragma: no cover
class MockPipe(object):
'''An object that behaves like a pipe.'''
def __init__(self, contents, on_empty=None):
'''Load up contents into pipe. Set up an optional callback function.'''
self.contents = contents
self.on_empty = on_empty
@staticmethod
def poll(time=None):
'''Check if pipe is empty. There is always something there, either
some content or an EOFError.'''
return True
def recv(self):
'''Return first item off the list or raise exception.
Call callback function if defined and pipe is emptied.'''
if not self.contents:
raise EOFError('Pipe is empty')
if len(self.contents) == 1 and self.on_empty:
self.on_empty()
return self.contents.pop(0)
@staticmethod
def close():
'''This pipe can't be written to anyway. Ignore call.'''
def assert_empty(self):
'''Pipe must have been read out completely.'''
assert not self.contents
def assert_single_call_only(target):
def wrapper(*args, **kwargs):
if hasattr(wrapper, 'called'):
raise Exception('Only a single call to object %s is allowed' % str(target))
setattr(wrapper, 'called', True)
return target(*args, **kwargs)
return wrapper
###############################################################################
@mock.patch('workflows.frontend.multiprocessing')
@mock.patch('workflows.frontend.workflows.transport')
def test_frontend_connects_to_transport_layer(mock_transport, mock_mp):
'''Frontend should call connect method on transport layer module and subscribe to a unique command queue.'''
workflows.frontend.Frontend()
mock_transport.lookup.assert_called_once_with(None)
mock_transport.lookup.return_value.assert_called_once_with()
mock_transport.lookup.return_value.return_value.connect.assert_called_once_with()
workflows.frontend.Frontend(transport="some_transport")
mock_transport.lookup.assert_called_with("some_transport")
mock_trn = mock.Mock()
workflows.frontend.Frontend(mock_trn)
mock_trn.assert_called_once_with()
mock_trn.return_value.connect.assert_called_once_with()
@mock.patch('workflows.frontend.workflows.transport')
def test_frontend_subscribes_to_command_channel(mock_transport):
'''Frontend should call connect method on transport layer module and subscribe to a unique command queue.'''
transport = mock_transport.lookup.return_value.return_value
fe = workflows.frontend.Frontend()
mock_transport.lookup.assert_called_once_with(None)
transport.connect.assert_called_once()
transport.subscribe.assert_not_called()
def frontend_with_message(message):
transport.reset_mock()
fe = workflows.frontend.Frontend(transport_command_channel=mock.sentinel.command)
transport.subscribe_broadcast.assert_called_once_with(mock.sentinel.command, mock.ANY)
transport.subscribe_broadcast.call_args[0][1]({}, message)
return fe
assert frontend_with_message({}).shutdown == False
assert frontend_with_message({ 'command': 'shutdown' }).shutdown == False
assert frontend_with_message({ 'command': 'shutdown', 'host': fe.get_host_id() }).shutdown == True
assert frontend_with_message({ 'command': 'shutdown', 'host': mock.sentinel.different_host }).shutdown == False
assert frontend_with_message({ 'command': 'shutdown', 'service': fe.get_status()['serviceclass'] }).shutdown == True
assert frontend_with_message({ 'command': 'shutdown', 'service': mock.sentinel.different_service }).shutdown == False
@mock.patch('workflows.frontend.multiprocessing')
@mock.patch('workflows.frontend.workflows.transport')
def test_start_service_in_frontend(mock_transport, mock_mp):
'''Check that the service is being run and connected to the frontend via the correct pipes.'''
mock_service = mock.Mock()
pipes = [ mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock() ]
mock_mp.Pipe.side_effect = [
(pipes[0], pipes[1]),
(pipes[2], pipes[3]),
None ]
# initialize frontend
fe = workflows.frontend.Frontend(environment=mock.sentinel.environment)
# start service
fe.switch_service(mock_service)
# check service was started properly
mock_service.assert_called_once_with(environment=mock.sentinel.environment)
mock_service.return_value.connect.assert_called_once_with(commands=pipes[0], frontend=pipes[3])
mock_mp.Process.assert_called_once_with(target=mock_service.return_value.start, args=(), kwargs=mock.ANY)
mock_mp.Process.return_value.start.assert_called_once()
# Fun with python multiprocessing:
# Because the pipe file descriptors are copied they must be closed in the process not using them
pipes[0].close.assert_called_once_with()
pipes[3].close.assert_called_once_with()
@mock.patch('workflows.frontend.workflows.transport')
def test_get_frontend_status(mock_transport):
'''Check that the get_status-method works and contains the correct host-id.'''
fe = workflows.frontend.Frontend()
status = fe.get_status()
assert status['host'] == fe.get_host_id()
@pytest.mark.timeout(3)
def test_frontend_can_handle_unhandled_service_initialization_exceptions():
'''When a service crashes on initialization an exception should be thrown.'''
transport = mock.Mock()
fe = workflows.frontend.Frontend(transport=transport,
service=assert_single_call_only(ServiceCrashingOnInit))
transport = transport.return_value
transport.connect.assert_called_once()
with pytest.raises(workflows.Error):
fe.run()
status_list = [ args[0].get('status') for args, kwargs in transport.broadcast_status.call_args_list if args ]
assert status_list == [
CommonService.SERVICE_STATUS_NEW,
CommonService.SERVICE_STATUS_STARTING,
CommonService.SERVICE_STATUS_ERROR,
CommonService.SERVICE_STATUS_END,
]
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_can_handle_service_initialization_segfaults(mock_mp):
'''When a service crashes on initialization an exception should be thrown.'''
transport = mock.Mock()
service = mock.Mock()
service_process = mock.Mock()
dummy_pipe = mock.Mock()
dummy_pipe.recv.side_effect = EOFError() # Dead on arrival
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
mock_mp.Process.return_value = service_process
fe = workflows.frontend.Frontend(transport=transport, service=service)
transport = transport.return_value
transport.connect.assert_called_once()
mock_mp.Process.assert_called_once_with(target=service.return_value.start, args=(), kwargs=mock.ANY)
with pytest.raises(workflows.Error):
fe.run()
service_process.start.assert_called()
service_process.join.assert_called()
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_terminates_on_transport_disconnection(mock_mp):
'''When the transport connection is lost permanently, the frontend should stop.'''
transport = mock.Mock()
service = mock.Mock()
service_process = mock.Mock()
dummy_pipe = mock.Mock()
dummy_pipe.poll.side_effect = [ False, Exception('Frontend did not terminate on transport disconnect') ]
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
mock_mp.Process.return_value = service_process
service_process.is_alive.return_value = True
fe = workflows.frontend.Frontend(transport=transport, service=service)
transport = transport.return_value
transport.connect.assert_called_once()
transport.is_connected.return_value = False
mock_mp.Process.assert_called_once_with(target=service.return_value.start, args=(), kwargs=mock.ANY)
with pytest.raises(workflows.Error):
fe.run()
service_process.terminate.assert_called()
service_process.join.assert_called()
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_parses_status_updates(mock_mp):
'''The frontend should forward status updates to the advertiser thread when appropriate.'''
transport = mock.Mock()
service_process = mock.Mock()
mock_mp.Process.return_value = service_process
dummy_pipe = MockPipe([
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_NEW},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_STARTING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_PROCESSING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_SHUTDOWN},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_END},
])
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
fe = workflows.frontend.Frontend(transport=transport, service=mock.Mock())
# intercept status code updates
status_list = []
original_status_update_fn = fe.update_status
def intercept(*args, **kwargs):
if 'status_code' in kwargs:
status_list.append(kwargs['status_code'])
return original_status_update_fn(*args, **kwargs)
fe.update_status = intercept
fe.run()
dummy_pipe.assert_empty()
assert status_list == [CommonService.SERVICE_STATUS_NEW,
CommonService.SERVICE_STATUS_STARTING,
CommonService.SERVICE_STATUS_PROCESSING,
CommonService.SERVICE_STATUS_SHUTDOWN,
CommonService.SERVICE_STATUS_END, # Following updates caused by frontend
CommonService.SERVICE_STATUS_END,
CommonService.SERVICE_STATUS_TEARDOWN]
@mock.patch('workflows.frontend.time')
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_sends_status_updates(mock_mp, mock_time):
'''The frontend should send status updates on update_status() calls.
Some sensible rate limiting must be applied.'''
transport = mock.Mock()
mock_time.time.return_value = 10
fe = workflows.frontend.Frontend(transport=transport)
transport = transport.return_value
transport.broadcast_status.assert_called_once()
assert transport.broadcast_status.call_args[0][0]['status'] == \
CommonService.SERVICE_STATUS_NONE
transport.broadcast_status.reset_mock()
fe.update_status(status_code=CommonService.SERVICE_STATUS_STARTING)
transport.broadcast_status.assert_called_once()
assert transport.broadcast_status.call_args[0][0]['status'] == \
CommonService.SERVICE_STATUS_STARTING
transport.broadcast_status.reset_mock()
# subsequent update call that does not change anything should be ignored
fe.update_status()
transport.broadcast_status.assert_not_called()
# time passes. Update call should cause broadcast.
mock_time.time.return_value = 20
fe.update_status()
transport.broadcast_status.assert_called_once()
assert transport.broadcast_status.call_args[0][0]['status'] == \
CommonService.SERVICE_STATUS_STARTING
transport.broadcast_status.reset_mock()
# not much time passes. Update call should still cause broadcast, because status has not been seen before
mock_time.time.return_value = 20.1
fe.update_status(status_code=CommonService.SERVICE_STATUS_PROCESSING)
transport.broadcast_status.assert_called_once()
assert transport.broadcast_status.call_args[0][0]['status'] == \
CommonService.SERVICE_STATUS_PROCESSING
transport.broadcast_status.reset_mock()
# not much time passes. Update call should not broadcast, because this is a recent IDLE status
mock_time.time.return_value = 20.2
fe.update_status(status_code=CommonService.SERVICE_STATUS_IDLE)
transport.broadcast_status.assert_not_called()
# not much time passes. Update call should not broadcast, because the announced status has not changed
mock_time.time.return_value = 20.3
fe.update_status(status_code=CommonService.SERVICE_STATUS_PROCESSING)
transport.broadcast_status.assert_not_called()
transport.broadcast_status.reset_mock()
# not much time passes. Update call should still not broadcast, because this is again a recent IDLE status
mock_time.time.return_value = 20.4
fe.update_status(status_code=CommonService.SERVICE_STATUS_IDLE)
transport.broadcast_status.assert_not_called()
# not much time passes. Update call should still not broadcast, because this IDLE is still too recent
mock_time.time.return_value = 20.8
fe.update_status(status_code=CommonService.SERVICE_STATUS_IDLE)
transport.broadcast_status.assert_not_called()
# however as time passes the update call should cause a late broadcast
mock_time.time.return_value = 21
fe.update_status()
transport.broadcast_status.assert_called_once()
assert transport.broadcast_status.call_args[0][0]['status'] == \
CommonService.SERVICE_STATUS_IDLE
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_does_not_restart_nonrestartable_service_on_segfault(mock_mp):
'''When the frontend is constructed with restart_service=False failing services must not be restarted.'''
service_factory = mock.Mock()
service_process = mock.Mock()
dummy_pipe = mock.Mock()
dummy_pipe.recv.side_effect = EOFError() # Dead on arrival
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
mock_mp.Process.return_value = service_process
service_instances = [ mock.Mock() ]
service_factory.side_effect = service_instances + [ Exception('More than one service object instantiated') ]
fe = workflows.frontend.Frontend(transport=mock.Mock(), service=service_factory, restart_service=False)
with pytest.raises(workflows.Error):
fe.run()
service_factory.assert_called_once()
service_process.start.assert_called_once()
service_process.join.assert_called_once()
mock_mp.Process.assert_called_once_with(target=service_instances[0].start, args=(), kwargs=mock.ANY)
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_does_not_restart_nonrestartable_service_on_error(mock_mp):
'''When the frontend is constructed with restart_service=False failing services must not be restarted.'''
transport = mock.Mock()
service_instances = [ mock.Mock() ]
service_factory = mock.Mock()
service_factory.side_effect = service_instances + [ Exception('More than one service object instantiated') ]
service_process = mock.Mock()
mock_mp.Process.return_value = service_process
dummy_pipe = MockPipe([
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_NEW},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_STARTING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_PROCESSING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_ERROR},
])
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
fe = workflows.frontend.Frontend(transport=mock.Mock(), service=service_factory, restart_service=False)
with pytest.raises(workflows.Error):
fe.run()
service_factory.assert_called_once()
service_process.start.assert_called_once()
service_process.join.assert_called_once()
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_does_restart_restartable_service_on_segfault(mock_mp):
'''When the frontend is constructed with restart_service=True failing services must be restarted.'''
transport_factory = mock.Mock()
transport1 = mock.Mock()
transport2 = mock.Mock()
transport3 = mock.Mock()
transport1.return_value.connect.return_value = True
transport2.return_value.connect.return_value = True
transport3.return_value.connect.return_value = True
transport_factory.side_effect = [ transport1, transport2, transport3, None ]
service_factory = mock.Mock()
service_process = mock.Mock()
dummy_pipe = mock.Mock()
dummy_pipe.recv.side_effect = EOFError() # Dead on arrival
mock_mp.Pipe.return_value = (dummy_pipe, dummy_pipe)
mock_mp.Process.return_value = service_process
sentinel_exception = Exception('break loop')
service_instances = [ mock.Mock(), mock.Mock() ]
service_factory.side_effect = service_instances + [ sentinel_exception ]
fe = workflows.frontend.Frontend(transport=transport_factory, service=service_factory, restart_service=True)
with pytest.raises(Exception) as e:
fe.run()
assert e.value == sentinel_exception
assert service_factory.call_count == 3
assert service_process.start.call_count == 2
assert service_process.join.call_count == 2
mock_mp.Process.assert_has_calls( [ mock.call(args=(), kwargs=mock.ANY, target=service_instances[0].start),
mock.call(args=(), kwargs=mock.ANY, target=service_instances[1].start) ], any_order=True )
assert service_instances[0].transport == transport2
assert service_instances[1].transport == transport3
assert transport1.connect.call_count == 1
assert transport2.connect.call_count == 0
assert transport3.connect.call_count == 0
@mock.patch('workflows.frontend.multiprocessing')
def test_frontend_does_restart_restartable_service_on_error(mock_mp):
'''When the frontend is constructed with restart_service=True failing services must be restarted.'''
transport = mock.Mock()
sentinel_exception = Exception('break loop')
service_instances = [ mock.Mock(), mock.Mock() ]
service_factory = mock.Mock()
service_factory.side_effect = service_instances + [ sentinel_exception ]
service_process = mock.Mock()
mock_mp.Process.return_value = service_process
def pipe_creator(*args, **kwargs):
'''Pipe creator creates pipes.'''
dummy_pipe = MockPipe([
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_NEW},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_STARTING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_PROCESSING},
{'band': 'status_update', 'statuscode': CommonService.SERVICE_STATUS_ERROR},
])
return (dummy_pipe, dummy_pipe)
mock_mp.Pipe.side_effect = pipe_creator
fe = workflows.frontend.Frontend(transport=mock.Mock(), service=service_factory, restart_service=True)
try:
fe.run()
assert False, "Exception should have been raised"
except Exception as e:
if e != sentinel_exception:
raise
assert service_factory.call_count == 3
assert service_process.start.call_count == 2
assert service_process.join.call_count == 2
mock_mp.Process.assert_has_calls( [ mock.call(args=(), kwargs=mock.ANY, target=service_instances[0].start),
mock.call(args=(), kwargs=mock.ANY, target=service_instances[1].start) ], any_order=True )
| {
"repo_name": "xia2/workflows",
"path": "workflows/frontend/test_frontend.py",
"copies": "1",
"size": "18872",
"license": "bsd-3-clause",
"hash": 2740475426110459000,
"line_mean": 42.7865429234,
"line_max": 128,
"alpha_frac": 0.725148368,
"autogenerated": false,
"ratio": 3.8017727639000807,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9936930551209847,
"avg_score": 0.01799811613804664,
"num_lines": 431
} |
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import copy
from utils import *
class ReplayMemory(object):
def __init__(self, history_length, memory_size=1000000, replay_start_size=100,
state_dim=(), action_dim=(), state_dtype='uint8', action_dtype='uint8',
ctx=mx.gpu()):
self.rng = get_numpy_rng()
self.ctx = ctx
assert type(action_dim) is tuple and type(state_dim) is tuple, \
"Must set the dimensions of state and action for replay memory"
self.state_dim = state_dim
if action_dim == (1,):
self.action_dim = ()
else:
self.action_dim = action_dim
self.states = numpy.zeros((memory_size,) + state_dim, dtype=state_dtype)
self.actions = numpy.zeros((memory_size,) + action_dim, dtype=action_dtype)
self.rewards = numpy.zeros(memory_size, dtype='float32')
self.terminate_flags = numpy.zeros(memory_size, dtype='bool')
self.memory_size = memory_size
self.replay_start_size = replay_start_size
self.history_length = history_length
self.top = 0
self.size = 0
def latest_slice(self):
if self.size >= self.history_length:
return self.states.take(numpy.arange(self.top - self.history_length, self.top),
axis=0, mode="wrap")
else:
assert False, "We can only slice from the replay memory if the " \
"replay size is larger than the length of frames we want to take" \
"as the input."
@property
def sample_enabled(self):
return self.size > self.replay_start_size
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0
def reset(self):
"""
Reset all the flags stored in the replay memory.
It will not clear the inner-content and is a light/quick version of clear()
"""
self.top = 0
self.size = 0
def copy(self):
# TODO Test the copy function
replay_memory = copy.copy(self)
replay_memory.states = numpy.zeros(self.states.shape, dtype=self.states.dtype)
replay_memory.actions = numpy.zeros(self.actions.shape, dtype=self.actions.dtype)
replay_memory.rewards = numpy.zeros(self.rewards.shape, dtype='float32')
replay_memory.terminate_flags = numpy.zeros(self.terminate_flags.shape, dtype='bool')
replay_memory.states[numpy.arange(self.top-self.size, self.top), ::] = \
self.states[numpy.arange(self.top-self.size, self.top)]
replay_memory.actions[numpy.arange(self.top-self.size, self.top)] = \
self.actions[numpy.arange(self.top-self.size, self.top)]
replay_memory.rewards[numpy.arange(self.top-self.size, self.top)] = \
self.rewards[numpy.arange(self.top-self.size, self.top)]
replay_memory.terminate_flags[numpy.arange(self.top-self.size, self.top)] = \
self.terminate_flags[numpy.arange(self.top-self.size, self.top)]
return replay_memory
def append(self, obs, action, reward, terminate_flag):
self.states[self.top] = obs
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminate_flags[self.top] = terminate_flag
self.top = (self.top + 1) % self.memory_size
if self.size < self.memory_size:
self.size += 1
def sample_last(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length - 1
while counter < batch_size:
full_indices = numpy.arange(first_index, first_index + self.history_length+1)
end_index = first_index + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
first_index -= 1
continue
states[counter + offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
first_index -= 1
return actions, rewards, terminate_flags
def sample_mix(self, batch_size, states, offset, current_index):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length + current_index
thisid = first_index
while counter < batch_size:
full_indices = numpy.arange(thisid, thisid + self.history_length+1)
end_index = thisid + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
thisid -= 1
continue
states[counter+offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
thisid = self.rng.randint(low=self.top - self.size, high=self.top - self.history_length-1)
return actions, rewards, terminate_flags
def sample_inplace(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length )
transition_indices = numpy.arange(index, index + self.history_length+1)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
if numpy.any(self.terminate_flags.take(initial_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
continue
states[counter + offset] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
# next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return actions, rewards, terminate_flags
def sample(self, batch_size):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
#TODO Possibly states + inds for less memory access
states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
next_states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length)
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
while numpy.any(self.terminate_flags.take(initial_indices, mode='wrap')):
# Check if terminates in the middle of the sample!
index -= 1
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
states[counter] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return states, actions, rewards, next_states, terminate_flags
| {
"repo_name": "danithaca/mxnet",
"path": "example/reinforcement-learning/dqn/replay_memory.py",
"copies": "16",
"size": "11299",
"license": "apache-2.0",
"hash": 7942620694802313000,
"line_mean": 52.5497630332,
"line_max": 105,
"alpha_frac": 0.6053633065,
"autogenerated": false,
"ratio": 3.8602664844550736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import os
import pickle
from collections import OrderedDict
import logging
from utils import *
logger = logging.getLogger(__name__)
class Base(object):
"""Basic wrapper for the symbols
Parameters
----------
data_shapes : dict
The shapes of tensor variables
sym_gen : mx.sym.Symbol
Symbol of the network
params : None or dict, optional
params_grad : None or dict, optional
aux_states:
initializer:
ctx:
name:
"""
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None,
default_bucket_kwargs=None, learn_init_keys=None,
initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2),
ctx=mx.gpu(), name='Net'):
self.sym_gen = sym_gen
bucket_kwargs = default_bucket_kwargs.copy() if \
default_bucket_kwargs is not None else dict()
self.curr_bucket_key = None
self.ctx = ctx
self.name = name
self.initializer = initializer
if params is None:
self.params = None
self.params_grad = None
else:
self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()])
self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx))
for n, v in self.params.items()])
if aux_states is not None:
self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()])
else:
self.aux_states = None
self._buckets = dict()
self.learn_init_keys = learn_init_keys if learn_init_keys is not None else []
self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys}
self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes)
self.acc_grad = None
@property
def exe(self):
"""Get the current executor
Returns
-------
exe : mxnet.executor.Executor
"""
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
@property
def data_shapes(self):
return self._buckets[self.curr_bucket_key]['data_shapes']
@property
def sym(self):
return self._buckets[self.curr_bucket_key]['sym']
def switch_bucket(self, bucket_kwargs=None, data_shapes=None):
if bucket_kwargs is not None:
self.curr_bucket_key = get_bucket_key(bucket_kwargs=bucket_kwargs)
# 1. Check if bucket key exists
if self.curr_bucket_key in self._buckets:
if data_shapes is not None:
if tuple(data_shapes.items()) not in self._buckets[self.curr_bucket_key]['exe']:
#TODO Optimize the reshaping functionality!
self._buckets[self.curr_bucket_key]['exe'][tuple(data_shapes.items())] = \
self.exe.reshape(partial_shaping=True, allow_up_sizing=True, **data_shapes)
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
else:
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
return
# 2. If the bucket key does not exist, create new symbol + executor
assert data_shapes is not None, "Must set data_shapes for new bucket!"
if isinstance(self.sym_gen, mx.symbol.Symbol):
sym = self.sym_gen
else:
sym = self.sym_gen(**dict(self.curr_bucket_key))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = [n for n in arg_names
if n in self.learn_init_keys or (n not in data_shapes.keys())]
for k, v in data_shapes.items():
assert isinstance(v, tuple), "Data_shapes must be tuple! Find k=%s, v=%s, " \
"data_shapes=%s" % (k, str(v), str(data_shapes))
arg_shapes, _, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = OrderedDict([(k, s) for k, s in zip(arg_names, arg_shapes)])
if self.params is None:
self.params = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
self.params_grad = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
if len(self.params) > 0:
assert self.initializer is not None, \
'We must set the initializer if we donnot initialize' \
'manually the free parameters of the network!!'
for k, v in self.params.items():
self.initializer(k, v)
else:
assert set(arg_name_shape.items()) == \
set(data_shapes.items() + [(k, v.shape) for k, v in self.params.items()])
if self.aux_states is None:
self.aux_states = OrderedDict([(k, nd.empty(s, ctx=self.ctx))
for k, s in zip(aux_names, aux_shapes)])
data_inputs = {k: mx.nd.empty(data_shapes[k], ctx=self.ctx)
for k in set(data_shapes.keys()) - set(self.learn_init_keys)}
if len(self._buckets) > 0:
shared_exe = list(list(self._buckets.values())[0]['exe'].values())[0]
else:
shared_exe = None
self._buckets[self.curr_bucket_key] = {
'exe': {tuple(data_shapes.items()):
sym.bind(ctx=self.ctx,
args=dict(self.params, **data_inputs),
args_grad=dict(self.params_grad.items()),
aux_states=self.aux_states,
shared_exec=shared_exe)
},
'data_shapes': data_shapes,
'sym': sym
}
def save_params(self, dir_path="", epoch=None):
param_saving_path = save_params(dir_path=dir_path, name=self.name, epoch=epoch,
params=self.params,
aux_states=self.aux_states)
misc_saving_path = save_misc(dir_path=dir_path, epoch=epoch, name=self.name,
content={'data_shapes': {k: map(int, v) for k, v in self.data_shapes.items()}})
logging.info('Saving %s, params: \"%s\", misc: \"%s\"',
self.name, param_saving_path, misc_saving_path)
def load_params(self, name="", dir_path="", epoch=None):
params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name)
logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name))
for k, v in params.items():
if k in self.params:
logging.debug(' Loading %s %s' %(k, str(v.shape)))
self.params[k][:] = v
else:
logging.warn("Found unused param in the saved model file: %s" % k)
for k, v in aux_states.items():
self.aux_states[k][:] = v
@property
def internal_sym_names(self):
return self.sym.get_internals().list_outputs()
@property
def output_keys(self):
return self.sym.list_outputs()
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0]
def forward(self, is_train=False, bucket_kwargs=None, **arg_dict):
#import time
#start = time.time()
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
#end = time.time()
#print 'Swith Bucket:', end - start
#start = time.time()
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape,\
"Shape not match: key %s, need %s, received %s" \
%(k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=is_train)
for output in self.exe.outputs:
output.wait_to_read()
#end = time.time()
#print 'Forward:', end - start
return self.exe.outputs
def backward(self, out_grads=None, **arg_dict):
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape, \
"Shape not match: key %s, need %s, received %s" \
% (k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.backward(out_grads=out_grads)
def forward_backward(self, bucket_kwargs=None, out_grads=None, **arg_dict):
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
for k, v in arg_dict.items():
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=True)
self.exe.backward(out_grads=out_grads)
for output in self.exe.outputs:
output.wait_to_read()
return self.exe.outputs
def update(self, updater, params_grad=None):
if params_grad is None:
params_grad = self.params_grad
assert type(params_grad) is OrderedDict
for ind, k in enumerate(self.params.keys()):
updater(index=ind, grad=params_grad[k], weight=self.params[k])
def update_acc_grad(self):
if self.acc_grad is None:
self.acc_grad = OrderedDict([(n, nd.zeros(v.shape, ctx=self.ctx))
for n, v in self.params_grad.items()])
for k, v in self.acc_grad.items():
v[:] = v + self.params_grad[k]
def reset_acc_grad(self):
for v in self.acc_grad.values():
v[:] = 0
def copy(self, name=None, ctx=None):
if ctx is None:
ctx = self.ctx
if name is None:
name = self.name + '-copy-' + str(ctx)
return Base(data_shapes=self.data_shapes,
sym_gen=self.sym_gen,
default_bucket_kwargs=dict(self.curr_bucket_key),
params=self.params,
aux_states=self.aux_states, ctx=ctx, name=name)
def copy_params_to(self, dst):
for k, v in self.params.items():
dst.params[k][:] = v
# TODO `wait_to_read()` here seems unnecessary, remove it in the future!
dst.params[k].wait_to_read()
@property
def total_param_num(self):
return sum(v.size for v in self.params.values())
def print_stat(self):
logging.info("Name: %s" % self.name)
assert self.params is not None, "Fatal Error!"
logging.info("Params: ")
for k, v in self.params.items():
logging.info(" %s: %s" % (k, v.shape))
if self.aux_states is None or 0 == len(self.aux_states):
logging.info("Aux States: None")
else:
logging.info("Aux States: " + ' '.join(
["%s:%s" % (str(k), str(v.shape)) for k, v in self.aux_states.items()]))
logging.info("Total Parameter Num: " + str(self.total_param_num))
| {
"repo_name": "coder-james/mxnet",
"path": "example/reinforcement-learning/dqn/base.py",
"copies": "15",
"size": "12988",
"license": "apache-2.0",
"hash": -8084616376120728000,
"line_mean": 42.1495016611,
"line_max": 116,
"alpha_frac": 0.5407299045,
"autogenerated": false,
"ratio": 3.7559282822440716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020566216219524724,
"num_lines": 301
} |
from __future__ import absolute_import, division, print_function
import networkx as nx
from dask.core import istask, get_dependencies
from toolz import first
def make_hashable(x):
try:
hash(x)
return x
except TypeError:
return hash(str(x))
def lower(func):
while hasattr(func, 'func'):
func = func.func
return func
def name(func):
try:
return lower(func).__name__
except AttributeError:
return 'func'
def to_networkx(d, data_attributes=None, function_attributes=None):
if data_attributes is None:
data_attributes = dict()
if function_attributes is None:
function_attributes = dict()
g = nx.DiGraph()
for k, v in sorted(d.items(), key=first):
g.add_node(k, shape='box', **data_attributes.get(k, dict()))
if istask(v):
func, args = v[0], v[1:]
func_node = make_hashable((v, 'function'))
g.add_node(func_node,
shape='circle',
label=name(func),
**function_attributes.get(k, dict()))
g.add_edge(k, func_node)
for dep in sorted(get_dependencies(d, k)):
arg2 = make_hashable(dep)
g.add_node(arg2,
label=str(dep),
shape='box',
**data_attributes.get(dep, dict()))
g.add_edge(func_node, arg2)
else:
g.add_node(k, label='%s=%s' % (k, v), **data_attributes.get(k, dict()))
return g
def write_networkx_to_dot(dg, filename='mydask'):
import os
p = nx.to_pydot(dg)
with open(filename + '.dot', 'w') as f:
f.write(p.to_string())
os.system('dot -Tpdf %s.dot -o %s.pdf' % (filename, filename))
os.system('dot -Tpng %s.dot -o %s.png' % (filename, filename))
print("Writing graph to %s.pdf" % filename)
def dot_graph(d, filename='mydask', **kwargs):
dg = to_networkx(d, **kwargs)
write_networkx_to_dot(dg, filename=filename)
if __name__ == '__main__':
def add(x, y):
return x + y
def inc(x):
return x + 1
dsk = {'x': 1, 'y': (inc, 'x'),
'a': 2, 'b': (inc, 'a'),
'z': (add, 'y', 'b')}
dot_graph(dsk)
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/dot.py",
"copies": "1",
"size": "2295",
"license": "bsd-3-clause",
"hash": -7085710115613450000,
"line_mean": 26,
"line_max": 83,
"alpha_frac": 0.522875817,
"autogenerated": false,
"ratio": 3.375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4397875817,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import networkx as nx
from dask.core import istask, get_dependencies
def make_hashable(x):
try:
hash(x)
return x
except TypeError:
return hash(str(x))
def lower(func):
while hasattr(func, 'func'):
func = func.func
return func
def name(func):
try:
return lower(func).__name__
except AttributeError:
return 'func'
def to_networkx(d, data_attributes=None, function_attributes=None):
if data_attributes is None:
data_attributes = dict()
if function_attributes is None:
function_attributes = dict()
g = nx.DiGraph()
for k, v in sorted(d.items(), key=lambda x: x[0]):
g.add_node(k, shape='box', **data_attributes.get(k, dict()))
if istask(v):
func, args = v[0], v[1:]
func_node = make_hashable((v, 'function'))
g.add_node(func_node,
shape='circle',
label=name(func),
**function_attributes.get(k, dict()))
g.add_edge(func_node, k)
for dep in sorted(get_dependencies(d, k)):
arg2 = make_hashable(dep)
g.add_node(arg2,
label=str(dep),
shape='box',
**data_attributes.get(dep, dict()))
g.add_edge(arg2, func_node)
else:
if v not in d:
g.add_node(k, label='%s=%s' % (k, v), **data_attributes.get(k, dict()))
else: # alias situation
g.add_edge(v, k)
return g
def write_networkx_to_dot(dg, filename='mydask'):
import os
try:
p = nx.to_pydot(dg)
except AttributeError:
raise ImportError("Can not find pydot module. Please install.\n"
" pip install pydot")
p.set_rankdir('BT')
with open(filename + '.dot', 'w') as f:
f.write(p.to_string())
os.system('dot -Tpdf %s.dot -o %s.pdf' % (filename, filename))
os.system('dot -Tpng %s.dot -o %s.png' % (filename, filename))
print("Writing graph to %s.pdf" % filename)
def dot_graph(d, filename='mydask', **kwargs):
dg = to_networkx(d, **kwargs)
write_networkx_to_dot(dg, filename=filename)
if __name__ == '__main__':
def add(x, y):
return x + y
def inc(x):
return x + 1
dsk = {'x': 1, 'y': (inc, 'x'),
'a': 2, 'b': (inc, 'a'),
'z': (add, 'y', 'b')}
dot_graph(dsk)
| {
"repo_name": "marianotepper/dask",
"path": "dask/dot.py",
"copies": "2",
"size": "2569",
"license": "bsd-3-clause",
"hash": -8990308331645823000,
"line_mean": 26.9239130435,
"line_max": 87,
"alpha_frac": 0.514986376,
"autogenerated": false,
"ratio": 3.4483221476510066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4963308523651007,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import networkx as nx
from datashape import discover
from .utils import expand_tuples, cls_name
from contextlib import contextmanager
ooc_types = set() # Out-of-Core types
class NetworkDispatcher(object):
def __init__(self, name):
self.name = name
self.graph = nx.DiGraph()
def register(self, a, b, cost=1.0):
sigs = expand_tuples([a, b])
def _(func):
for a, b in sigs:
self.graph.add_edge(b, a, cost=cost, func=func)
return func
return _
def path(self, *args, **kwargs):
return path(self.graph, *args, **kwargs)
def __call__(self, *args, **kwargs):
return _transform(self.graph, *args, **kwargs)
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types,
**kwargs):
""" Transform source to target type using graph of transformations """
x = source
excluded_edges = excluded_edges or set()
try:
if 'dshape' not in kwargs:
kwargs['dshape'] = discover(x)
except NotImplementedError:
pass
pth = path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types)
try:
for (A, B, f) in pth:
oldx = x
x = f(x, excluded_edges=excluded_edges, **kwargs)
return x
except NotImplementedError as e:
if kwargs.get('raise_on_errors'):
raise
print("Failed on %s -> %s. Working around" %
(A.__name__, B.__name__))
print("Error message:\n%s" % e)
new_exclusions = excluded_edges | set([(A, B)])
return _transform(graph, target, source, excluded_edges=new_exclusions, **kwargs)
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
if source not in graph:
for cls in valid_subclasses:
if issubclass(source, cls):
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
oldgraph = graph
graph = graph.subgraph([n for n in graph.nodes() if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
result = [(source, target, graph.edge[source][target]['func'])
for source, target in zip(pth, pth[1:])]
return result
# Catch-all subclasses
from collections import Iterator
import numpy as np
valid_subclasses = [Iterator, np.ndarray]
@contextmanager
def without_edges(g, edges):
edges = edges or []
held = dict()
for a, b in edges:
held[(a, b)] = g.edge[a][b]
g.remove_edge(a, b)
try:
yield g
finally:
for (a, b), kwargs in held.items():
g.add_edge(a, b, **kwargs)
| {
"repo_name": "mrocklin/into",
"path": "into/core.py",
"copies": "1",
"size": "3263",
"license": "bsd-3-clause",
"hash": -637591899890687000,
"line_mean": 30.0761904762,
"line_max": 89,
"alpha_frac": 0.5908673,
"autogenerated": false,
"ratio": 3.7548906789413117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845757978941312,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import networkx as nx
from datashape import discover
from .utils import expand_tuples, ignoring
from contextlib import contextmanager
ooc_types = set() # Out-of-Core types
class NetworkDispatcher(object):
def __init__(self, name):
self.name = name
self.graph = nx.DiGraph()
def register(self, a, b, cost=1.0):
sigs = expand_tuples([a, b])
def _(func):
for a, b in sigs:
self.graph.add_edge(b, a, cost=cost, func=func)
return func
return _
def path(self, *args, **kwargs):
return path(self.graph, *args, **kwargs)
def __call__(self, *args, **kwargs):
return _transform(self.graph, *args, **kwargs)
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types,
**kwargs):
""" Transform source to target type using graph of transformations """
x = source
excluded_edges = excluded_edges or set()
with ignoring(NotImplementedError):
if 'dshape' not in kwargs:
kwargs['dshape'] = discover(x)
pth = path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types)
try:
for (A, B, f) in pth:
x = f(x, excluded_edges=excluded_edges, **kwargs)
return x
except NotImplementedError as e:
if kwargs.get('raise_on_errors'):
raise
print("Failed on %s -> %s. Working around" % (A.__name__, B.__name__))
print("Error message:\n%s" % e)
new_exclusions = excluded_edges | set([(A, B)])
return _transform(graph, target, source, excluded_edges=new_exclusions,
**kwargs)
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
if source not in graph:
for cls in valid_subclasses:
if issubclass(source, cls):
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
result = [(src, tgt, graph.edge[src][tgt]['func'])
for src, tgt in zip(pth, pth[1:])]
return result
# Catch-all subclasses
from collections import Iterator
import numpy as np
valid_subclasses = [Iterator, np.ndarray]
@contextmanager
def without_edges(g, edges):
edges = edges or []
held = dict()
for a, b in edges:
held[(a, b)] = g.edge[a][b]
g.remove_edge(a, b)
try:
yield g
finally:
for (a, b), kwargs in held.items():
g.add_edge(a, b, **kwargs)
| {
"repo_name": "alexmojaki/odo",
"path": "odo/core.py",
"copies": "3",
"size": "3228",
"license": "bsd-3-clause",
"hash": 6346366850859720000,
"line_mean": 30.3398058252,
"line_max": 79,
"alpha_frac": 0.5861214374,
"autogenerated": false,
"ratio": 3.7798594847775178,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008051903810718323,
"num_lines": 103
} |
from __future__ import absolute_import, division, print_function
import numbers
from datetime import date, datetime
import toolz
from toolz import first, concat, memoize, unique, assoc
import itertools
from collections import Iterator
from ..compatibility import basestring
from ..expr import Expr, Field, Symbol, symbol, eval_str
from ..dispatch import dispatch
__all__ = ['compute', 'compute_up']
base = (numbers.Number, basestring, date, datetime)
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(Expr, object)
def compute(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_exprs = list(expr._leaves())
leaf_data = [scope.get(leaf) for leaf in leaf_exprs]
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = dict((e, pre_compute_(e, datum,
**assoc(kwargs, 'scope', scope2)))
for e, datum in scope2.items())
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf] for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"expr: %s\n"
"data: %s" % (expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
def top_to_bottom(d, expr, **kwargs):
""" Processes an expression top-down then bottom-up """
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
if not hasattr(expr, '_leaves'):
return expr
leaves = list(expr._leaves())
data = [d.get(leaf) for leaf in leaves]
# See if we have a direct computation path with compute_down
try:
return compute_down(expr, *data, **kwargs)
except NotImplementedError:
pass
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
# Otherwise...
# Compute children of this expression
if hasattr(expr, '_inputs'):
children = [top_to_bottom(d, child, **kwargs)
for child in expr._inputs]
else:
children = []
# Did we experience a data type change?
if type_change(data, children):
# If so call pre_compute again
if pre_compute_:
children = [pre_compute_(expr, child, **kwargs) for child in children]
# If so call optimize again
if optimize_:
try:
expr = optimize_(expr, *children)
except NotImplementedError:
pass
# Compute this expression given the children
return compute_up(expr, *children, scope=d, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = dict()
_used_tokens = set()
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t)
t
>>> makeleaf(t.x)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x)**2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
token = None
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
_used_tokens.add((name, expr._token))
return expr
if (name, token) in _used_tokens:
for token in itertools.count():
if (name, token) not in _used_tokens:
break
result = symbol(name, expr.dshape, token)
_used_tokens.add((name, token))
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {amount_sum: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs(dict((i, e) for i, e in zip(inputs, exprs)
if not i.isidentical(e)))
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def bottom_up(d, expr):
"""
Process an expression from the leaves upwards
Parameters
----------
d : dict mapping {Symbol: data}
Maps expressions to data elements, likely at the leaves of the tree
expr : Expr
Expression to compute
Helper function for ``compute``
"""
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
# Compute children of this expression
children = ([bottom_up(d, child) for child in expr._inputs]
if hasattr(expr, '_inputs') else [])
# Compute this expression given the children
result = compute_up(expr, *children, scope=d)
return result
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import Data
>>> t = Data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {t: [1, 2, 3]})
>>> expr, scope = _
>>> list(scope.keys())[0]._resources()
{}
"""
resources = expr._resources()
symbol_dict = dict((t, symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
other_scope = dict((k, v) for k, v in scope.items()
if k not in symbol_dict)
new_scope = toolz.merge(resources, other_scope)
expr = expr._subs(symbol_dict)
return expr, new_scope
@dispatch(Expr, dict)
def compute(expr, d, **kwargs):
""" Compute expression against data sources
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
expr2, d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr2
)
else:
d3 = d2
if optimize_:
try:
expr3 = optimize_(expr2, *[v for e, v in d3.items() if e in expr2])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr3 = expr2
d4 = d3
else:
expr3 = expr2
d4 = d3
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
if post_compute_:
result = post_compute_(expr3, result, scope=d4)
return result
@dispatch(Field, dict)
def compute_up(expr, data, **kwargs):
return data[expr._name]
| {
"repo_name": "scls19fr/blaze",
"path": "blaze/compute/core.py",
"copies": "6",
"size": "14107",
"license": "bsd-3-clause",
"hash": 1879904644053326300,
"line_mean": 28.3284823285,
"line_max": 82,
"alpha_frac": 0.5891401432,
"autogenerated": false,
"ratio": 3.6968029350104823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7285943078210482,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numbers
from datetime import date, datetime
import toolz
from toolz import first, unique, assoc
import itertools
from collections import Iterator
import pandas as pd
from odo import odo
from ..compatibility import basestring
from ..expr import Expr, Field, Symbol, symbol, Join
from ..dispatch import dispatch
__all__ = ['compute', 'compute_up']
base = numbers.Number, basestring, date, datetime
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(Expr, object)
def compute(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_exprs = list(expr._leaves())
leaf_data = [scope.get(leaf) for leaf in leaf_exprs]
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = dict((e, pre_compute_(e, datum,
**assoc(kwargs, 'scope', scope2)))
for e, datum in scope2.items())
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf]
for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"expr: %s\n"
"data: %s" % (expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = dict()
_used_tokens = set()
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t)
t
>>> makeleaf(t.x)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x)**2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
token = None
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
_used_tokens.add((name, expr._token))
return expr
if (name, token) in _used_tokens:
for token in itertools.count():
if (name, token) not in _used_tokens:
break
result = symbol(name, expr.dshape, token)
_used_tokens.add((name, token))
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {amount_sum: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs(dict((i, e) for i, e in zip(inputs, exprs)
if not i.isidentical(e)))
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import Data
>>> t = Data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {t: [1, 2, 3]})
>>> expr, scope = _
>>> list(scope.keys())[0]._resources()
{}
"""
resources = expr._resources()
symbol_dict = dict((t, symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
other_scope = dict((k, v) for k, v in scope.items()
if k not in symbol_dict)
new_scope = toolz.merge(resources, other_scope)
expr = expr._subs(symbol_dict)
return expr, new_scope
@dispatch(Expr, dict)
def compute(expr, d, **kwargs):
""" Compute expression against data sources
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
expr2, d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr2
)
else:
d3 = d2
if optimize_:
try:
expr3 = optimize_(expr2, *[v for e, v in d3.items() if e in expr2])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr3 = expr2
d4 = d3
else:
expr3 = expr2
d4 = d3
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
if post_compute_:
result = post_compute_(expr3, result, scope=d4)
return result
@dispatch(Field, dict)
def compute_up(expr, data, **kwargs):
return data[expr._name]
@compute_up.register(Join, object, object)
def join_dataframe_to_selectable(expr, lhs, rhs, scope=None, **kwargs):
lexpr, rexpr = expr._leaves()
return compute(
expr,
{
lexpr: odo(lhs, pd.DataFrame, dshape=lexpr.dshape),
rexpr: odo(rhs, pd.DataFrame, dshape=rexpr.dshape)
},
**kwargs
)
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/compute/core.py",
"copies": "2",
"size": "12473",
"license": "bsd-3-clause",
"hash": 3165300232817224700,
"line_mean": 28.279342723,
"line_max": 79,
"alpha_frac": 0.583580534,
"autogenerated": false,
"ratio": 3.614314691393799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007113387395077537,
"num_lines": 426
} |
from __future__ import absolute_import, division, print_function
import numbers
import cPickle
import numpy as np
import theano
import theano.tensor as tt
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
floatX = theano.config.floatX
def cast_floatX(n):
return np.asarray(n, dtype=floatX)
class Model(object):
def __init__(self, config):
self._params = [] # shared variables for learned parameters
self._sticky_hidden_states = [] # shared variables which are reset before each epoch
self._np_rng = np.random.RandomState(config.seed // 2 + 123)
self._theano_rng = RandomStreams(config.seed // 2 + 321) # generates random numbers directly on GPU
self._init_scale = config.init_scale
self._is_training = tt.iscalar('is_training')
self._lr = theano.shared(cast_floatX(config.learning_rate), 'lr')
input_data = tt.imatrix('input_data') # (batch_size, num_steps)
targets = tt.imatrix('targets') # (batch_size, num_steps)
noise_x = tt.matrix('noise_x') # (batch_size, num_steps)
# Embed input words and apply variational dropout (for each sample, the embedding of
# a dropped word-type consists of all zeros at all occurrences of word-type in sample).
embedding = self.make_param((config.vocab_size, config.hidden_size), 'uniform')
inputs = embedding[input_data.T] # (num_steps, batch_size, hidden_size)
inputs = self.apply_dropout(inputs, tt.shape_padright(noise_x.T))
rhn_updates = []
for _ in range(config.num_layers):
# y shape: (num_steps, batch_size, hidden_size)
y, sticky_state_updates = self.RHNLayer(
inputs,
config.depth, config.batch_size, config.hidden_size,
config.drop_i, config.drop_s,
config.init_T_bias, config.init_other_bias,
config.tied_noise)
rhn_updates += sticky_state_updates
inputs = y
noise_o = self.get_dropout_noise((config.batch_size, config.hidden_size), config.drop_o)
outputs = self.apply_dropout(y, tt.shape_padleft(noise_o)) # (num_steps, batch_size, hidden_size)
# logits
softmax_w = embedding.T if config.tied_embeddings else self.make_param((config.hidden_size, config.vocab_size), 'uniform')
softmax_b = self.make_param((config.vocab_size,), config.init_other_bias)
logits = tt.dot(outputs, softmax_w) + softmax_b # (num_steps, batch_size, vocab_size)
# probabilities and prediction loss
flat_logits = logits.reshape((config.batch_size * config.num_steps, config.vocab_size))
flat_probs = tt.nnet.softmax(flat_logits)
flat_targets = targets.T.flatten() # (batch_size * num_steps,)
xentropies = tt.nnet.categorical_crossentropy(flat_probs, flat_targets) # (batch_size * num_steps,)
pred_loss = xentropies.sum() / config.batch_size
# weight decay
l2_loss = 0.5 * tt.sum(tt.stack([tt.sum(p**2) for p in self._params]))
loss = pred_loss + config.weight_decay * l2_loss
grads = theano.grad(loss, self._params)
# gradient clipping
global_grad_norm = tt.sqrt(tt.sum(tt.stack([tt.sum(g**2) for g in grads])))
clip_factor = ifelse(global_grad_norm < config.max_grad_norm,
cast_floatX(1),
tt.cast(config.max_grad_norm / global_grad_norm, floatX))
param_updates = [(p, p - self._lr * clip_factor * g) for p, g in zip(self._params, grads)]
self.train = theano.function(
[input_data, targets, noise_x],
loss,
givens = {self._is_training: np.int32(1)},
updates = rhn_updates + param_updates)
self.evaluate = theano.function(
[input_data, targets],
loss,
# Note that noise_x is unused in computation graph of this function since _is_training is false.
givens = {self._is_training: np.int32(0), noise_x: tt.zeros((config.batch_size, config.num_steps))},
updates = rhn_updates)
self._num_params = np.sum([param.get_value().size for param in self._params])
if config.load_model:
self.load(config.load_model)
@property
def lr(self):
return self._lr.get_value()
@property
def num_params(self):
return self._num_params
def make_param(self, shape, init_scheme):
"""Create Theano shared variables, which are used as trainable model parameters."""
if isinstance(init_scheme, numbers.Number):
init_value = np.full(shape, init_scheme, floatX)
elif init_scheme == 'uniform':
init_value = self._np_rng.uniform(low=-self._init_scale, high=self._init_scale, size=shape).astype(floatX)
else:
raise AssertionError('unsupported init_scheme')
p = theano.shared(init_value)
self._params.append(p)
return p
def apply_dropout(self, x, noise):
return ifelse(self._is_training, noise * x, x)
def get_dropout_noise(self, shape, dropout_p):
keep_p = 1 - dropout_p
noise = cast_floatX(1. / keep_p) * self._theano_rng.binomial(size=shape, p=keep_p, n=1, dtype=floatX)
return noise
def assign_lr(self, lr):
self._lr.set_value(cast_floatX(lr))
def reset_hidden_state(self):
for sticky_hidden_state in self._sticky_hidden_states:
sticky_hidden_state.set_value(np.zeros_like(sticky_hidden_state.get_value()))
def save(self, save_path):
with open(save_path, 'wb') as f:
for p in self._params:
cPickle.dump(p.get_value(), f, protocol=cPickle.HIGHEST_PROTOCOL)
def load(self, load_path):
with open(load_path, 'rb') as f:
for p in self._params:
p.set_value(cPickle.load(f))
def linear(self, x, in_size, out_size, bias, bias_init=None):
assert bias == (bias_init is not None)
w = self.make_param((in_size, out_size), 'uniform')
y = tt.dot(x, w)
if bias:
b = self.make_param((out_size,), bias_init)
y += b
return y
def RHNLayer(self, inputs, depth, batch_size, hidden_size, drop_i, drop_s, init_T_bias, init_H_bias, tied_noise):
"""Variational Recurrent Highway Layer (Theano implementation).
References:
Zilly, J, Srivastava, R, Koutnik, J, Schmidhuber, J., "Recurrent Highway Networks", 2016
Args:
inputs: Theano variable, shape (num_steps, batch_size, hidden_size).
depth: int, the number of RHN inner layers i.e. the number of micro-timesteps per timestep.
drop_i: float, probability of dropout over inputs.
drop_s: float, probability of dropout over recurrent hidden state.
init_T_bias: a valid bias_init argument for linear(), initialization of bias of transform gate T.
init_H_bias: a valid bias_init argument for linear(), initialization of bias of non-linearity H.
tied_noise: boolean, whether to use the same dropout masks when calculating H and when calculating T.
Returns:
y: Theano variable, recurrent hidden states at each timestep. Shape (num_steps, batch_size, hidden_size).
sticky_state_updates: a list of (shared variable, new shared variable value).
"""
# We first compute the linear transformation of the inputs over all timesteps.
# This is done outside of scan() in order to speed up computation.
# The result is then fed into scan()'s step function, one timestep at a time.
noise_i_for_H = self.get_dropout_noise((batch_size, hidden_size), drop_i)
noise_i_for_T = self.get_dropout_noise((batch_size, hidden_size), drop_i) if not tied_noise else noise_i_for_H
i_for_H = self.apply_dropout(inputs, noise_i_for_H)
i_for_T = self.apply_dropout(inputs, noise_i_for_T)
i_for_H = self.linear(i_for_H, in_size=hidden_size, out_size=hidden_size, bias=True, bias_init=init_H_bias)
i_for_T = self.linear(i_for_T, in_size=hidden_size, out_size=hidden_size, bias=True, bias_init=init_T_bias)
# Dropout noise for recurrent hidden state.
noise_s = self.get_dropout_noise((batch_size, hidden_size), drop_s)
if not tied_noise:
noise_s = tt.stack(noise_s, self.get_dropout_noise((batch_size, hidden_size), drop_s))
def step_fn(i_for_H_t, i_for_T_t, y_tm1, noise_s):
"""
Args:
Elements of sequences given to scan():
i_for_H_t: linear trans. of inputs for calculating non-linearity H at timestep t. Shape (batch_size, hidden_size).
i_for_T_t: linear trans. of inputs for calculating transform gate T at timestep t. Shape (batch_size, hidden_size).
Result of previous step function invocation (equals the outputs_info given to scan() on first timestep):
y_tm1: Shape (batch_size, hidden_size).
Non-sequences given to scan() (these are the same at all timesteps):
noise_s: (batch_size, hidden_size) or (2, batch_size, hidden_size), depending on value of tied_noise.
"""
tanh, sigm = tt.tanh, tt.nnet.sigmoid
noise_s_for_H = noise_s if tied_noise else noise_s[0]
noise_s_for_T = noise_s if tied_noise else noise_s[1]
s_lm1 = y_tm1
for l in range(depth):
s_lm1_for_H = self.apply_dropout(s_lm1, noise_s_for_H)
s_lm1_for_T = self.apply_dropout(s_lm1, noise_s_for_T)
if l == 0:
# On the first micro-timestep of each timestep we already have bias
# terms summed into i_for_H_t and into i_for_T_t.
H = tanh(i_for_H_t + self.linear(s_lm1_for_H, in_size=hidden_size, out_size=hidden_size, bias=False))
T = sigm(i_for_T_t + self.linear(s_lm1_for_T, in_size=hidden_size, out_size=hidden_size, bias=False))
else:
H = tanh(self.linear(s_lm1_for_H, in_size=hidden_size, out_size=hidden_size, bias=True, bias_init=init_H_bias))
T = sigm(self.linear(s_lm1_for_T, in_size=hidden_size, out_size=hidden_size, bias=True, bias_init=init_T_bias))
s_l = (H - s_lm1) * T + s_lm1
s_lm1 = s_l
y_t = s_l
return y_t
# The recurrent hidden state of the RHN is sticky (the last hidden state of one batch is carried over to the next batch,
# to be used as an initial hidden state). These states are kept in shared variables and are reset before every epoch.
y_0 = theano.shared(np.zeros((batch_size, hidden_size), floatX))
self._sticky_hidden_states.append(y_0)
y, _ = theano.scan(step_fn,
sequences = [i_for_H, i_for_T],
outputs_info = [y_0],
non_sequences = [noise_s])
y_last = y[-1]
sticky_state_updates = [(y_0, y_last)]
return y, sticky_state_updates
| {
"repo_name": "julian121266/RecurrentHighwayNetworks",
"path": "theano_rhn.py",
"copies": "2",
"size": "10522",
"license": "mit",
"hash": 6387570429273125000,
"line_mean": 42.6597510373,
"line_max": 126,
"alpha_frac": 0.6510169169,
"autogenerated": false,
"ratio": 3.271766169154229,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9831270349805374,
"avg_score": 0.01830254724977094,
"num_lines": 241
} |
from __future__ import absolute_import, division, print_function
import numbers
import inspect
from pprint import pformat
from functools import reduce, partial
import numpy as np
import toolz
from toolz import unique, concat, first
import pandas as pd
from ..compatibility import _strtypes
from ..dispatch import dispatch
from ..utils import ordered_intersect
__all__ = ['Node', 'path', 'common_subexpression', 'eval_str']
base = (numbers.Number,) + _strtypes
arrtypes = np.ndarray, pd.core.generic.NDFrame
def isidentical(a, b):
""" Strict equality testing
Different from x == y -> Eq(x, y)
>>> isidentical(1, 1)
True
>>> from blaze.expr import symbol
>>> x = symbol('x', 'int')
>>> isidentical(x, 1)
False
>>> isidentical(x + 1, x + 1)
True
>>> isidentical(x + 1, x + 2)
False
>>> isidentical((x, x + 1), (x, x + 1))
True
>>> isidentical((x, x + 1), (x, x + 2))
False
"""
if isinstance(a, base) and isinstance(b, base):
return a == b
if isinstance(a, arrtypes) and isinstance(b, arrtypes):
return np.array_equal(a, b)
if type(a) != type(b):
return False
if isinstance(a, Node):
return all(map(isidentical, a._args, b._args))
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
return len(a) == len(b) and all(map(isidentical, a, b))
return a == b
class Node(object):
""" Node in a tree
This serves as the base class for ``Expr``. This class holds all of the
tree traversal functions that are independent of tabular or array
computation. This is everything that we can do independent of the problem
domain. Note that datashape is not imported.
See Also
--------
blaze.expr.expressions.Expr
"""
__slots__ = ()
__inputs__ = '_child',
def __init__(self, *args, **kwargs):
slots = set(self.__slots__)
if not frozenset(slots) <= slots:
raise TypeError('Unknown keywords: %s' % (set(kwargs) - slots))
assigned = set()
for slot, arg in zip(self.__slots__[1:], args):
assigned.add(slot)
setattr(self, slot, arg)
for key, value in kwargs.items():
if key in assigned:
raise TypeError(
'%s got multiple values for argument %r' % (
type(self).__name__,
key,
),
)
assigned.add(key)
setattr(self, key, value)
for slot in slots - assigned:
setattr(self, slot, None)
@property
def _args(self):
return tuple(getattr(self, slot) for slot in self.__slots__[1:])
_hashargs = _args
@property
def _inputs(self):
return tuple(getattr(self, i) for i in self.__inputs__)
def _leaves(self):
""" Leaves of an expression tree
All nodes without inputs. Leaves are returned in order, left to right.
>>> from blaze.expr import symbol, join, by
>>> t = symbol('t', 'var * {id: int32, name: string}')
>>> t._leaves()
[t]
>>> by(t.name, count=t.id.nunique())._leaves()
[t]
>>> v = symbol('v', 'var * {id: int32, city: string}')
>>> join(t, v)._leaves()
[t, v]
"""
if not self._inputs:
return [self]
else:
return list(unique(concat(i._leaves() for i in self._inputs if
isinstance(i, Node))))
isidentical = isidentical
def __hash__(self):
hash_ = self._hash
if hash_ is None:
hash_ = self._hash = hash((type(self), self._hashargs))
return hash_
def __str__(self):
rep = [
'%s=%s' % (slot, _str(arg))
for slot, arg in zip(self.__slots__[1:], self._args)
]
return '%s(%s)' % (type(self).__name__, ', '.join(rep))
def __repr__(self):
return str(self)
def _traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (arg._traverse() if isinstance(arg, Node) else [arg]
for arg in self._args)
for trav in traversals:
for item in trav:
yield item
def _subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount + 3
>>> expr._subs({3: 4, 'amount': 'id'}).isidentical(t.id + 4)
True
"""
return subs(self, d)
def _resources(self):
return toolz.merge([arg._resources() for arg in self._args
if isinstance(arg, Node)])
def _subterms(self):
return subterms(self)
def __contains__(self, other):
return other in set(self._subterms())
def __getstate__(self):
return tuple(self._args)
def __setstate__(self, state):
self.__init__(*state)
def __eq__(self, other):
ident = self.isidentical(other)
if ident is True:
return ident
try:
return self._eq(other)
except AttributeError:
# e.g., we can't compare whole tables to other things (yet?)
pass
return False
def __ne__(self, other):
return self._ne(other)
def __lt__(self, other):
return self._lt(other)
def __le__(self, other):
return self._le(other)
def __gt__(self, other):
return self._gt(other)
def __ge__(self, other):
return self._ge(other)
def __add__(self, other):
return self._add(other)
def __radd__(self, other):
return self._radd(other)
def __mul__(self, other):
return self._mul(other)
def __rmul__(self, other):
return self._rmul(other)
def __div__(self, other):
return self._div(other)
def __rdiv__(self, other):
return self._rdiv(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return self._floordiv(other)
def __rfloordiv__(self, other):
return self._rfloordiv(other)
def __sub__(self, other):
return self._sub(other)
def __rsub__(self, other):
return self._rsub(other)
def __pow__(self, other):
return self._pow(other)
def __rpow__(self, other):
return self._rpow(other)
def __mod__(self, other):
return self._mod(other)
def __rmod__(self, other):
return self._rmod(other)
def __or__(self, other):
return self._or(other)
def __ror__(self, other):
return self._ror(other)
def __and__(self, other):
return self._and(other)
def __rand__(self, other):
return self._rand(other)
def __neg__(self):
return self._neg()
def __invert__(self):
return self._invert()
def __abs__(self):
from .math import abs
return abs(self)
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
keywords = o.keywords
kwds = (
', '.join('%s=%r' % item for item in keywords.items())
if keywords else
''
)
args = ', '.join(map(repr, o.args))
arguments = []
if args:
arguments.append(args)
if kwds:
arguments.append(kwds)
return 'partial(%s, %s)' % (
get_callable_name(o.func),
', '.join(arguments),
)
try:
# python 3 makes builtins look nice
return o.__qualname__
except AttributeError:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return repr(s)
elif callable(s):
return get_callable_name(s)
elif isinstance(s, Node):
return str(s)
elif isinstance(s, (list, tuple)):
body = ", ".join(_str(x) for x in s)
return "({0})".format(body if len(s) > 1 else (body + ","))
else:
return pformat(s).rstrip()
@dispatch(Node)
def subterms(expr):
return concat([[expr], concat(map(subterms, expr._inputs))])
@dispatch(object)
def subterms(x):
yield x
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = dict((k, v) for k, v in d.items() if k is not v)
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except TypeError:
pass
return _subs(o, d)
@dispatch((tuple, list), dict)
def _subs(o, d):
return type(o)([subs(arg, d) for arg in o])
@dispatch(Node, dict)
def _subs(o, d):
"""
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> subs(t, {'balance': 'amount'}).fields
['name', 'amount']
"""
newargs = [subs(arg, d) for arg in o._args]
return type(o)(*newargs)
@dispatch(object, dict)
def _subs(o, d):
""" Private dispatched version of ``subs``
>>> subs('Hello', {})
'Hello'
"""
return o
def path(a, b):
""" A path of nodes from a to b
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount.sum()
>>> list(path(expr, t))
[sum(t.amount), t.amount, t]
"""
while not a.isidentical(b):
yield a
if not a._inputs:
break
for child in a._inputs:
if any(b.isidentical(node) for node in child._traverse()):
a = child
break
yield a
def common_subexpression(expr, *exprs):
""" Common sub expression between subexpressions
Examples
--------
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> common_subexpression(t.x, t.y)
t
"""
# only one expression has itself as a common subexpression
if not exprs:
return expr
exprs = (expr,) + exprs
# get leaves for every expression
all_leaves = [expr._leaves() for expr in exprs]
# leaves common to all expressions
leaves = set.intersection(*map(set, all_leaves))
# no common leaves therefore no common subexpression
if not leaves:
raise ValueError(
'No common leaves found in expressions %s' % list(exprs)
)
# list of paths from each expr to each leaf
pathlist = [list(path(expr, leaf)) for expr in exprs for leaf in leaves]
# ordered intersection of paths
common = reduce(ordered_intersect, pathlist)
if not common:
raise ValueError(
'No common subexpression found in paths to leaf: %s' % list(
map(set, pathlist)
)
)
# the first expression is the deepest node in the tree that is an ancestor
# of every expression in `exprs`
return first(common)
def eval_str(expr):
""" String suitable for evaluation
>>> from blaze.expr import symbol, eval_str
>>> x = symbol('x', 'real')
>>> eval_str(2*x + 1)
'(2 * x) + 1'
>>> from datetime import date
>>> eval_str(date(2000, 1, 20))
'datetime.date(2000, 1, 20)'
"""
from datetime import date, datetime
if isinstance(expr, (date, datetime)):
return repr(expr)
return repr(expr) if isinstance(expr, _strtypes) else str(expr)
def parenthesize(s):
"""
>>> parenthesize('1')
'1'
>>> parenthesize('1 + 2')
'(1 + 2)'
"""
if ' ' in s:
return '(%s)' % s
else:
return s
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/expr/core.py",
"copies": "2",
"size": "12350",
"license": "bsd-3-clause",
"hash": -7920633615569263000,
"line_mean": 24,
"line_max": 79,
"alpha_frac": 0.5337651822,
"autogenerated": false,
"ratio": 3.7572254335260116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 494
} |
from __future__ import absolute_import, division, print_function
import numbers
import logging
import operator
import numpy as np
from glue.external.six import add_metaclass
from glue.core.contracts import contract, ContractsMeta
from glue.core.subset import InequalitySubsetState
from glue.core.util import join_component_view
__all__ = ['ComponentLink', 'BinaryComponentLink', 'CoordinateComponentLink']
def identity(x):
return x
OPSYM = {operator.add: '+', operator.sub: '-',
operator.truediv: '/', operator.mul: '*',
operator.pow: '**'}
@add_metaclass(ContractsMeta)
class ComponentLink(object):
""" ComponentLinks represent transformation logic between ComponentIDs
ComponentLinks are be used to derive one
:class:`~glue.core.component_id.ComponentID` from another:
Example::
def hours_to_minutes(hours):
return hours * 60
d = Data(hour=[1, 2, 3])
hour = d.id['hour']
minute = ComponentID('minute')
link = ComponentLink( [hour], minute, using=hours_to_minutes)
link.compute(d) # array([ 60, 120, 180])
d.add_component_link(link)
d['minute'] # array([ 60, 120, 180])
"""
@contract(using='callable|None',
inverse='callable|None')
def __init__(self, comp_from, comp_to, using=None, inverse=None):
"""
:param comp_from: The input ComponentIDs
:type comp_from: list of :class:`~glue.core.component_id.ComponentID`
:param comp_to: The target component ID
:type comp_from: :class:`~glue.core.component_id.ComponentID`
:pram using: The translation function which maps data from
comp_from to comp_to (optional)
The using function should satisfy::
using(data[comp_from[0]],...,data[comp_from[-1]]) = desired data
:param inverse:
The inverse translation function, if exists (optional)
:raises:
TypeError if input is invalid
.. note ::
Both ``inverse`` and ``using`` should accept and return
numpy arrays
"""
from glue.core.data import ComponentID
self._from = comp_from
self._to = comp_to
if using is None:
using = identity
self._using = using
self._inverse = inverse
self.hidden = False # show in widgets?
self.identity = self._using is identity
if type(comp_from) is not list:
raise TypeError("comp_from must be a list: %s" % type(comp_from))
if not all(isinstance(f, ComponentID) for f in self._from):
raise TypeError("from argument is not a list of ComponentIDs: %s" %
self._from)
if not isinstance(self._to, ComponentID):
raise TypeError("to argument is not a ComponentID: %s" %
type(self._to))
if using is identity:
if len(comp_from) != 1:
raise TypeError("comp_from must have only 1 element, "
"or a 'using' function must be provided")
@contract(data='isinstance(Data)', view='array_view')
def compute(self, data, view=None):
"""For a given data set, compute the component comp_to given
the data associated with each comp_from and the ``using``
function
:param data: The data set to use
:param view: Optional view (e.g. slice) through the data to use
*Returns*:
The data associated with comp_to component
*Raises*:
InvalidAttribute, if the data set doesn't have all the
ComponentIDs needed for the transformation
"""
logger = logging.getLogger(__name__)
args = [data[join_component_view(f, view)] for f in self._from]
logger.debug("shape of first argument: %s", args[0].shape)
result = self._using(*args)
logger.debug("shape of result: %s", result.shape)
if result.shape != args[0].shape:
logger.warn("ComponentLink function %s changed shape. Fixing",
self._using.__name__)
result.shape = args[0].shape
return result
def get_from_ids(self):
""" The list of input ComponentIDs """
return self._from
@contract(old='isinstance(ComponentID)', new='isinstance(ComponentID)')
def replace_ids(self, old, new):
"""Replace all references to an old ComponentID with references
to new
:parma old: ComponentID to replace
:param new: ComponentID to replace with
"""
for i, f in enumerate(self._from):
if f is old:
self._from[i] = new
if self._to is old:
self._to = new
@contract(_from='list(isinstance(ComponentID))')
def set_from_ids(self, _from):
if len(_from) != len(self._from):
raise ValueError("New ID list has the wrong length.")
self._from = _from
def get_to_id(self):
""" The target ComponentID """
return self._to
def set_to_id(self, to):
self._to = to
def get_using(self):
""" The transformation function """
return self._using
def get_inverse(self):
""" The inverse transformation, or None """
return self._inverse
def __str__(self):
args = ", ".join([t.label for t in self._from])
if self._using is not identity:
result = "%s <- %s(%s)" % (self._to, self._using.__name__, args)
else:
result = "%s <-> %s" % (self._to, self._from)
return result
def __repr__(self):
return str(self)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
class CoordinateComponentLink(ComponentLink):
@contract(comp_from='list(isinstance(ComponentID))',
comp_to='isinstance(ComponentID)',
coords='isinstance(Coordinates)',
index=int,
pixel2world=bool)
def __init__(self, comp_from, comp_to, coords, index, pixel2world=True):
self.coords = coords
self.index = index
self.pixel2world = pixel2world
# Some coords don't need all pixel coords
# to compute a given world coord, and vice versa
# (e.g., spectral data cubes)
self.ndim = len(comp_from)
self.from_needed = coords.dependent_axes(index)
self._from_all = comp_from
comp_from = [comp_from[i] for i in self.from_needed]
super(CoordinateComponentLink, self).__init__(
comp_from, comp_to, self.using)
self.hidden = True
def using(self, *args):
attr = 'pixel2world' if self.pixel2world else 'world2pixel'
func = getattr(self.coords, attr)
args2 = [None] * self.ndim
for f, a in zip(self.from_needed, args):
args2[f] = a
for i in range(self.ndim):
if args2[i] is None:
args2[i] = np.zeros_like(args[0])
args2 = tuple(args2)
return func(*args2[::-1])[::-1][self.index]
def __str__(self):
rep = 'pix2world' if self.pixel2world else 'world2pix'
sup = super(CoordinateComponentLink, self).__str__()
return sup.replace('using', rep)
class BinaryComponentLink(ComponentLink):
"""
A ComponentLink that combines two inputs with a binary function
:param left: The first input argument.
ComponentID, ComponentLink, or number
:param right: The second input argument.
ComponentID, ComponentLink, or number
:param op: A function with two inputs that works on numpy arrays
The CompoentLink represents the logic of applying `op` to the
data associated with the inputs `left` and `right`.
"""
def __init__(self, left, right, op):
from glue.core.data import ComponentID
self._left = left
self._right = right
self._op = op
from_ = []
if isinstance(left, ComponentID):
from_.append(left)
elif isinstance(left, ComponentLink):
from_.extend(left.get_from_ids())
elif not isinstance(left, numbers.Number):
raise TypeError("Cannot create BinaryComponentLink using %s" %
left)
if isinstance(right, ComponentID):
from_.append(right)
elif isinstance(right, ComponentLink):
from_.extend(right.get_from_ids())
elif not isinstance(right, numbers.Number):
raise TypeError("Cannot create BinaryComponentLink using %s" %
right)
to = ComponentID("")
null = lambda *args: None
super(BinaryComponentLink, self).__init__(from_, to, null)
def replace_ids(self, old, new):
super(BinaryComponentLink, self).replace_ids(old, new)
if self._left is old:
self._left = new
elif isinstance(self._left, ComponentLink):
self._left.replace_ids(old, new)
if self._right is old:
self._right = new
elif isinstance(self._right, ComponentLink):
self._right.replace_ids(old, new)
def compute(self, data, view=None):
l = self._left
r = self._right
if not isinstance(self._left, numbers.Number):
l = data[self._left, view]
if not isinstance(self._right, numbers.Number):
r = data[self._right, view]
return self._op(l, r)
def __str__(self):
sym = OPSYM.get(self._op, self._op.__name__)
return '(%s %s %s)' % (self._left, sym, self._right)
def __repr__(self):
return "<BinaryComponentLink: %s>" % self
| {
"repo_name": "saimn/glue",
"path": "glue/core/component_link.py",
"copies": "1",
"size": "12336",
"license": "bsd-3-clause",
"hash": 918570524636241200,
"line_mean": 32.9834710744,
"line_max": 79,
"alpha_frac": 0.6074902724,
"autogenerated": false,
"ratio": 4.052562417871222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160052690271222,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numbers
import numpy as np
from functools import partial
from itertools import chain
import datashape
from datashape import (
DataShape,
Fixed,
Option,
Record,
Unit,
Var,
dshape,
object_,
promote,
var,
)
from datashape.predicates import isscalar, iscollection, isrecord
from toolz import (
compose,
concat as tconcat,
concatv,
first,
frequencies,
get,
isdistinct,
keymap,
)
import toolz.curried.operator as op
from odo.utils import copydoc
from .core import common_subexpression
from .expressions import (
attribute,
_setattr,
ElemWise,
Expr,
Field,
Selection,
dshape_method_list,
label,
ndim,
shape,
varargsexpr,
)
from .utils import maxshape
from .literal import data, literal
from ..compatibility import zip_longest, _strtypes
from ..utils import listpack
__all__ = [
'Concat',
'concat',
'Distinct',
'distinct',
'Head',
'head',
'IsIn',
'isin',
'Join',
'join',
'Merge',
'merge',
'Sample',
'sample',
'Shift',
'shift',
'Sort',
'sort',
'Tail',
'tail',
'transform',
]
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
_arguments = '_child', '_key', 'ascending'
def _dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort a collection
Parameters
----------
key : str, list of str, or Expr
Defines by what you want to sort.
* A single column string: ``t.sort('amount')``
* A list of column strings: ``t.sort(['name', 'amount'])``
* An expression: ``t.sort(-t.amount)``
If sorting a columnar dataset, the ``key`` is ignored, as it is not
necessary:
* ``t.amount.sort()``
* ``t.amount.sort('amount')``
* ``t.amount.sort('foobar')``
are all equivalent.
ascending : bool, optional
Determines order of the sort
"""
if ascending not in (True, False):
# NOTE: this test is to guard against users saying `x.sort('a', 'b')`
# when they should have said `x.sort(['a', 'b'])`.
msg = "ascending must be True or False, given {}"
raise ValueError(msg.format(ascending))
if not isrecord(child.dshape.measure):
if key is None or isinstance(key, _strtypes):
# Handle this case separately.
return Sort(child, None, ascending)
msg = "sort key {!r} not valid for schema {!r}"
raise ValueError(msg.format(key, child.dshape.measure))
if key is None and isrecord(child.dshape.measure):
key = child.dshape.measure.names
if isinstance(key, (list, tuple)):
key = keys_to_validate = tuple(key)
else:
keys_to_validate = (key,)
for k in keys_to_validate:
if k is None:
msg = "sort key {!r} not valid for schema {!r}"
raise ValueError(msg.format(k, child.dshape.measure))
elif isinstance(k, _strtypes):
if k not in child.dshape.measure.names:
msg = "sort key {} is not a column of schema {}"
raise ValueError(msg.format(k, child.dshape.measure))
elif not isinstance(k, Expr):
msg = "sort key {} is not a string column name or an expression."
raise ValueError(msg.format(k))
return Sort(child, key, ascending)
class Distinct(Expr):
""" Remove duplicate elements from an expression
Parameters
----------
on : tuple of :class:`~blaze.expr.expressions.Field`
The subset of fields or names of fields to be distinct on.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
Use a subset by passing `on`:
>>> import pandas as pd
>>> e = distinct(t, 'name')
>>> data = pd.DataFrame([['Alice', 100, 1],
... ['Alice', 200, 2],
... ['Bob', 100, 1],
... ['Bob', 200, 2]],
... columns=['name', 'amount', 'id'])
>>> compute(e, data)
name amount id
0 Alice 100 1
1 Bob 100 1
"""
_arguments = '_child', 'on'
def _dshape(self):
return var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct({child}{on})'.format(
child=self._child,
on=(', ' if self.on else '') + ', '.join(map(str, self.on))
)
@copydoc(Distinct)
def distinct(expr, *on):
fields = frozenset(expr.fields)
_on = []
append = _on.append
for n in on:
if isinstance(n, Field):
if n._child.isidentical(expr):
n = n._name
else:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
if not isinstance(n, _strtypes):
raise TypeError('on must be a name or field, not: {0}'.format(n))
elif n not in fields:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
append(n)
return Distinct(expr, tuple(_on))
class _HeadOrTail(Expr):
_arguments = '_child', 'n'
def _dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%d)' % (self._child, type(self).__name__.lower(), self.n)
class Head(_HeadOrTail):
""" First `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Tail
"""
pass
@copydoc(Head)
def head(child, n=10):
return Head(child, n)
class Tail(_HeadOrTail):
""" Last `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.tail(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Head
"""
pass
@copydoc(Tail)
def tail(child, n=10):
return Tail(child, n)
class Sample(Expr):
"""Random row-wise sample. Can specify `n` or `frac` for an absolute or
fractional number of rows, respectively.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sample(n=2).dshape
dshape("var * {name: string, amount: int32}")
>>> accounts.sample(frac=0.1).dshape
dshape("var * {name: string, amount: int32}")
"""
_arguments = '_child', 'n', 'frac'
def _dshape(self):
return self._child.dshape
def __str__(self):
arg = 'n={}'.format(self.n) if self.n is not None else 'frac={}'.format(self.frac)
return '%s.sample(%s)' % (self._child, arg)
@copydoc(Sample)
def sample(child, n=None, frac=None):
if n is frac is None:
raise TypeError("sample() missing 1 required argument, 'n' or 'frac'.")
if n is not None and frac is not None:
raise ValueError("n ({}) and frac ({}) cannot both be specified.".format(n, frac))
if n is not None:
n = op.index(n)
if n < 1:
raise ValueError("n must be positive, given {}".format(n))
if frac is not None:
frac = float(frac)
if not 0.0 <= frac <= 1.0:
raise ValueError("sample requires 0 <= frac <= 1.0, given {}".format(frac))
return Sample(child, n, frac)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
new_fields = []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Record):
new_fields.extend(schema.fields)
elif isinstance(schema, (Unit, Option)):
new_fields.append((c._name, schema))
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % schema)
return dshape(Record(new_fields))
class Merge(ElemWise):
""" Merge many fields together
Parameters
----------
*labeled_exprs : iterable[Expr]
The positional expressions to merge. These will use the expression's
_name as the key in the resulting table.
**named_exprs : dict[str, Expr]
The named expressions to label and merge into the table.
Examples
--------
>>> from blaze import symbol, label
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
Notes
-----
To control the ordering of the fields, use ``label``:
>>> merge(label(accounts.name, 'NAME'), label(accounts.x, 'X')).dshape
dshape("var * {NAME: string, X: int32}")
>>> merge(label(accounts.x, 'X'), label(accounts.name, 'NAME')).dshape
dshape("var * {X: int32, NAME: string}")
See Also
--------
:class:`~blaze.expr.expressions.label`
"""
_arguments = 'args', '_varargsexpr', '_shape'
_input_attributes = '_varargsexpr',
def _dshape(self):
return DataShape(*self._shape + (schema_concat(self.args),))
@property
def fields(self):
return list(tconcat(arg.fields for arg in self.args))
def _get_field(self, key):
for arg in self.args:
if key in arg.fields:
if isscalar(arg.dshape.measure):
return arg
else:
return arg[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*(self[c] for c in key))
def _select(self, predicate):
subexpr = common_subexpression(predicate, *self.args)
return self._subs({subexpr: Selection(subexpr, predicate)})
@attribute
def _child(self):
return _setattr(
self,
'_common_subexpr',
common_subexpression(*self.args),
)
def _wrap(ob, name):
"""Wrap an object in an interactive expression if it is not already
an object, otherwise return it unchanged.
Parameters
----------
ob : any
The object to potentially wrap.
name : str
The name of the interactive expression if created.
Returns
-------
maybe_wrapped : Expr
A blaze expression.
"""
return data(ob, name=name) if not isinstance(ob, Expr) else ob
@copydoc(Merge)
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
# we only have one object so don't need to construct a merge
if exprs:
# we only have a positional argumnent, return it unchanged
return exprs[0]
if kwargs:
# we only have a single keyword argument, label it and return it
[(k, v)] = kwargs.items()
return v.label(k)
# label all the kwargs and sort in key order
exprs = tuple(concatv(
(_wrap(expr, '_%s' % n) for n, expr in enumerate(exprs)),
(
label(_wrap(v, k), k)
for k, v in sorted(kwargs.items(), key=first)
),
))
if all(ndim(expr) == 0 for expr in exprs):
raise TypeError('cannot merge all scalar expressions')
result = Merge(
exprs,
varargsexpr(exprs),
maxshape(map(shape, exprs)),
)
if not isdistinct(result.fields):
raise ValueError(
"Repeated columns found: " + ', '.join(
k for k, v in frequencies(result.fields).items() if v > 1
),
)
return result
def transform(expr, replace=True, **kwargs):
"""Add named columns to table
Parameters
----------
expr : Expr
A tabular expression.
replace : bool, optional
Should new columns be allowed to replace old columns?
**kwargs
The new columns to add to the table
Returns
-------
merged : Merge
A new tabular expression with the new columns merged into the table.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
See Also
--------
:class:`~blaze.expr.collections.merge`
"""
if replace and set(expr.fields).intersection(set(kwargs)):
expr = expr[[c for c in expr.fields if c not in kwargs]]
args = [expr] + [
_wrap(v, k).label(k) for k, v in sorted(kwargs.items(), key=first)
]
return merge(*args)
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs, rhs : Expr
Expressions to join
on_left : str, optional
The fields from the left side to join on.
If no ``on_right`` is passed, then these are the fields for both
sides.
on_right : str, optional
The fields from the right side to join on.
how : {'inner', 'outer', 'left', 'right'}
What type of join to perform.
suffixes: pair of str
The suffixes to be applied to the left and right sides
in order to resolve duplicate field names.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
_arguments = 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes'
_input_attributes = 'lhs', 'rhs'
@property
def on_left(self):
on_left = self._on_left
if isinstance(on_left, tuple):
return list(on_left)
return on_left
@property
def on_right(self):
on_right = self._on_right
if isinstance(on_right, tuple):
return list(on_right)
return on_right
def _schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
on_left = self.on_left
if not isinstance(on_left, list):
on_left = on_left,
on_right = self.on_right
if not isinstance(on_right, list):
on_right = on_right,
right_types = keymap(
dict(zip(on_right, on_left)).get,
self.rhs.dshape.measure.dict,
)
joined = (
(name, promote(extract_key(dt), extract_key(right_types[name]), promote_option=False))
for n, (name, dt) in enumerate(filter(
compose(op.contains(on_left), first),
self.lhs.dshape.measure.fields,
))
)
left = [
(name, dt) for name, dt in zip(
self.lhs.fields,
types_of_fields(self.lhs.fields, self.lhs)
) if name not in on_left
]
right = [
(name, dt) for name, dt in zip(
self.rhs.fields,
types_of_fields(self.rhs.fields, self.rhs)
) if name not in on_right
]
# Handle overlapping but non-joined case, e.g.
left_other = set(name for name, dt in left if name not in on_left)
right_other = set(name for name, dt in right if name not in on_right)
overlap = left_other & right_other
left_suffix, right_suffix = self.suffixes
left = ((name + left_suffix if name in overlap else name, dt)
for name, dt in left)
right = ((name + right_suffix if name in overlap else name, dt)
for name, dt in right)
if self.how in ('right', 'outer'):
left = ((name, option(dt)) for name, dt in left)
if self.how in ('left', 'outer'):
right = ((name, option(dt)) for name, dt in right)
return dshape(Record(chain(joined, left, right)))
def _dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields, = fields
assert fields == expr._name
return expr.dshape.measure
def extract_key(m):
return m.measure.key if isinstance(m.measure, datashape.coretypes.Map) else m
@copydoc(Join)
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError(
"Can not Join. No shared columns between %s and %s" % (lhs, rhs),
)
left_types = listpack(types_of_fields(on_left, lhs))
right_types = listpack(types_of_fields(on_right, rhs))
# Replace map[x, y] with x to resolve foreign keys.
left_types = list(map(extract_key, left_types))
right_types = list(map(extract_key, right_types))
if len(left_types) != len(right_types):
raise ValueError(
'Length of on_left=%d not equal to length of on_right=%d' % (
len(left_types), len(right_types),
),
)
for n, promotion in enumerate(map(partial(promote, promote_option=False),
left_types,
right_types)):
if promotion == object_:
raise TypeError(
'Schemata of joining columns do not match,'
' no promotion found for %s=%s and %s=%s' % (
on_left[n], left_types[n], on_right[n], right_types[n],
),
)
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs, rhs : Expr
Collections to concatenate
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
Vertically stack tables:
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
Vertically stack matrices:
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack matrices:
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
_arguments = 'lhs', 'rhs', 'axis'
_input_attributes = 'lhs', 'rhs'
def _dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
@copydoc(Concat)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
class IsIn(ElemWise):
"""Check if an expression contains values from a set.
Return a boolean expression indicating whether another expression
contains values that are members of a collection.
Parameters
----------
expr : Expr
Expression whose elements to check for membership in `keys`
keys : Sequence
Elements to test against. Blaze stores this as a ``frozenset``.
Examples
--------
Check if a vector contains any of 1, 2 or 3:
>>> from blaze import symbol
>>> t = symbol('t', '10 * int64')
>>> expr = t.isin([1, 2, 3])
>>> expr.dshape
dshape("10 * bool")
"""
_arguments = '_child', '_keys'
_input_attributes = '_child', '_keys'
def _schema(self):
return datashape.bool_
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%s)' % (self._child, type(self).__name__.lower(),
self._keys)
@copydoc(IsIn)
def isin(expr, keys):
if not isinstance(keys, Expr):
keys = literal(keys)
return IsIn(expr, keys)
class Shift(Expr):
""" Shift a column backward or forward by N elements
Parameters
----------
expr : Expr
The expression to shift. This expression's dshape should be columnar
n : int
The number of elements to shift by. If n < 0 then shift backward,
if n == 0 do nothing, else shift forward.
"""
_arguments = '_child', 'n'
def _schema(self):
measure = self._child.schema.measure
# if we are not shifting or we are already an Option type then return
# the child's schema
if not self.n or isinstance(measure, Option):
return measure
else:
return Option(measure)
def _dshape(self):
return DataShape(*(self._child.dshape.shape + tuple(self.schema)))
def __str__(self):
return '%s(%s, n=%d)' % (
type(self).__name__.lower(), self._child, self.n
)
@copydoc(Shift)
def shift(expr, n):
if not isinstance(n, (numbers.Integral, np.integer)):
raise TypeError('n must be an integer')
return Shift(expr, n)
dshape_method_list.extend([(iscollection, set([sort, head, tail, sample])),
(lambda ds: len(ds.shape) == 1, set([distinct, shift])),
(lambda ds: (len(ds.shape) == 1 and
isscalar(getattr(ds.measure, 'key', ds.measure))), set([isin])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin]))])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/collections.py",
"copies": "3",
"size": "26662",
"license": "bsd-3-clause",
"hash": -4585575986744477000,
"line_mean": 26.8599791014,
"line_max": 104,
"alpha_frac": 0.5506713675,
"autogenerated": false,
"ratio": 3.7509848058525606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002447223985306051,
"num_lines": 957
} |
from __future__ import absolute_import, division, print_function
import numbers
import operator
import numpy as np
from glue.external.six import PY3
from glue.core.roi import CategoricalROI
from glue.core.contracts import contract
from glue.core.util import split_component_view
from glue.core.registry import Registry
from glue.core.exceptions import IncompatibleAttribute
from glue.core.message import SubsetDeleteMessage, SubsetUpdateMessage
from glue.core.decorators import memoize
from glue.core.visual import VisualAttributes
from glue.config import settings
from glue.utils import view_shape, broadcast_to
__all__ = ['Subset', 'SubsetState', 'RoiSubsetState', 'CategoricalROISubsetState',
'RangeSubsetState', 'MultiRangeSubsetState', 'CompositeSubsetState',
'OrState', 'AndState', 'XorState', 'InvertState', 'MaskSubsetState', 'CategorySubsetState',
'ElementSubsetState', 'InequalitySubsetState', 'combine_multiple',
'CategoricalMultiRangeSubsetState', 'CategoricalROISubsetState2D']
OPSYM = {operator.ge: '>=', operator.gt: '>',
operator.le: '<=', operator.lt: '<',
operator.and_: '&', operator.or_: '|',
operator.xor: '^', operator.eq: '==',
operator.ne: '!='}
SYMOP = dict((v, k) for k, v in OPSYM.items())
class Subset(object):
"""Base class to handle subsets of data.
These objects both describe subsets of a dataset, and relay any
state changes to the hub that their parent data are assigned to.
This base class only directly impements the logic that relays
state changes back to the hub. Subclasses implement the actual
description and manipulation of data subsets
:param data:
The dataset that this subset describes
:type data: :class:`~glue.core.data.Data`
"""
@contract(data='isinstance(Data)|None',
color='color',
alpha=float,
label='string|None')
def __init__(self, data, color=settings.SUBSET_COLORS[0], alpha=0.5, label=None):
""" Create a new subset object.
Note: the preferred way for creating subsets is
via DataCollection.new_subset_group. Manually-instantiated
subsets will probably *not* be represented properly by the UI
"""
self._broadcasting = False # must be first def
self.data = data
self._subset_state = None
self._label = None
self._style = None
self._setup(color, alpha, label)
@contract(color='color', alpha='float', label='string|None')
def _setup(self, color, alpha, label):
self.color = color
self.label = label # trigger disambiguation
self.style = VisualAttributes(parent=self)
self.style.markersize *= 1.5
self.style.color = color
self.style.alpha = alpha
self.subset_state = SubsetState() # calls proper setter method
@property
def subset_state(self):
return self._subset_state
@subset_state.setter
def subset_state(self, state):
if isinstance(state, np.ndarray):
if self.data.shape != state.shape:
raise ValueError("Shape of mask doesn't match shape of data")
cids = self.data.pixel_component_ids
state = MaskSubsetState(state, cids)
if not isinstance(state, SubsetState):
raise TypeError("State must be a SubsetState instance or array")
self._subset_state = state
@property
def style(self):
return self._style
@style.setter
@contract(value=VisualAttributes)
def style(self, value):
value.parent = self
self._style = value
@property
def label(self):
""" Convenience access to subset's label """
return self._label
@label.setter
def label(self, value):
"""Set the subset's label
Subset labels within a data object must be unique. The input
will be auto-disambiguated if necessary
"""
value = Registry().register(self, value, group=self.data)
self._label = value
@property
def attributes(self):
"""
Returns a tuple of the ComponentIDs that this subset
depends upon
"""
return self.subset_state.attributes
def register(self):
""" Register a subset to its data, and start broadcasting
state changes
"""
self.data.add_subset(self)
self.do_broadcast(True)
@contract(returns='array[N]')
def to_index_list(self):
"""
Convert the current subset to a list of indices. These index
the elements in the (flattened) data object that belong to the subset.
If x is the numpy array corresponding to some component.data,
the two following statements are equivalent::
x.flat[subset.to_index_list()]
x[subset.to_mask()]
Returns:
A numpy array, giving the indices of elements in the data that
belong to this subset.
Raises:
IncompatibleDataException: if an index list cannot be created
for the requested data set.
"""
try:
return self.subset_state.to_index_list(self.data)
except IncompatibleAttribute as exc:
try:
return self._to_index_list_join()
except IncompatibleAttribute:
raise exc
def _to_index_list_join(self):
return np.where(self._to_mask_join(None).flat)[0]
def _to_mask_join(self, view):
"""
Convert the subset to a mask through an entity join to another
dataset.
"""
for other, (cid1, cid2) in self.data._key_joins.items():
if getattr(other, '_recursing', False):
continue
try:
self.data._recursing = True
s2 = Subset(other)
s2.subset_state = self.subset_state
mask_right = s2.to_mask()
except IncompatibleAttribute:
continue
finally:
self.data._recursing = False
if len(cid1) == 1 and len(cid2) == 1:
key_left = self.data[cid1[0], view]
key_right = other[cid2[0], mask_right]
mask = np.in1d(key_left.ravel(), key_right.ravel())
return mask.reshape(key_left.shape)
elif len(cid1) == len(cid2):
key_left_all = []
key_right_all = []
for cid1_i, cid2_i in zip(cid1, cid2):
key_left_all.append(self.data[cid1_i, view].ravel())
key_right_all.append(other[cid2_i, mask_right].ravel())
# TODO: The following is slow because we are looping in Python.
# This could be made significantly faster by switching to
# C/Cython.
key_left_all = zip(*key_left_all)
key_right_all = set(zip(*key_right_all))
result = [key in key_right_all for key in key_left_all]
result = np.array(result)
return result.reshape(self.data[cid1_i, view].shape)
elif len(cid1) == 1:
key_left = self.data[cid1[0], view].ravel()
mask = np.zeros_like(key_left, dtype=bool)
for cid2_i in cid2:
key_right = other[cid2_i, mask_right].ravel()
mask |= np.in1d(key_left, key_right)
return mask.reshape(self.data[cid1[0], view].shape)
elif len(cid2) == 1:
key_right = other[cid2[0], mask_right].ravel()
mask = np.zeros_like(self.data[cid1[0], view].ravel(), dtype=bool)
for cid1_i in cid1:
key_left = self.data[cid1_i, view].ravel()
mask |= np.in1d(key_left, key_right)
return mask.reshape(self.data[cid1[0], view].shape)
else:
raise Exception("Either the number of components in the key join sets "
"should match, or one of the component sets should ",
"contain a single component.")
raise IncompatibleAttribute
@contract(view='array_view', returns='array')
def to_mask(self, view=None):
"""
Convert the current subset to a mask.
:param view: An optional view into the dataset (e.g. a slice)
If present, the mask will pertain to the view and not the
entire dataset.
Returns:
A boolean numpy array, the same shape as the data, that
defines whether each element belongs to the subset.
"""
try:
return self.subset_state.to_mask(self.data, view)
except IncompatibleAttribute as exc:
try:
return self._to_mask_join(view)
except IncompatibleAttribute:
raise exc
@contract(value=bool)
def do_broadcast(self, value):
"""
Set whether state changes to the subset are relayed to a hub.
It can be useful to turn off broadcasting, when modifying the
subset in ways that don't impact any of the clients.
Attributes:
value: Whether the subset should broadcast state changes (True/False)
"""
object.__setattr__(self, '_broadcasting', value)
@contract(attribute='string')
def broadcast(self, attribute):
"""
Explicitly broadcast a SubsetUpdateMessage to the hub
:param attribute:
The name of the attribute (if any) that should be
broadcast as updated.
:type attribute: ``str``
"""
if not hasattr(self, 'data') or not hasattr(self.data, 'hub'):
return
if self._broadcasting and self.data.hub:
msg = SubsetUpdateMessage(self, attribute=attribute)
self.data.hub.broadcast(msg)
def delete(self):
"""Broadcast a SubsetDeleteMessage to the hub, and stop broadcasting
Also removes subset reference from parent data's subsets list
"""
dobroad = self._broadcasting and self.data is not None and \
self.data.hub is not None
self.do_broadcast(False)
if self.data is not None and self in self.data.subsets:
self.data._subsets.remove(self)
if dobroad:
msg = SubsetDeleteMessage(self)
self.data.hub.broadcast(msg)
Registry().unregister(self, group=self.data)
@contract(file_name='string')
def write_mask(self, file_name, format="fits"):
""" Write a subset mask out to file
:param file_name: name of file to write to
:param format:
Name of format to write to. Currently, only "fits" is
supported
"""
mask = np.short(self.to_mask())
if format == 'fits':
try:
from astropy.io import fits
fits.writeto(file_name, mask, clobber=True)
except ImportError:
raise ImportError("Cannot write mask -- requires astropy")
else:
raise AttributeError("format not supported: %s" % format)
@contract(file_name='string')
def read_mask(self, file_name):
try:
from astropy.io import fits
mask = fits.open(file_name)[0].data
except ImportError:
raise ImportError("Cannot read mask -- requires astropy")
except IOError:
raise IOError("Could not read %s (not a fits file?)" % file_name)
ind = np.where(mask.flat)[0]
state = ElementSubsetState(indices=ind)
self.subset_state = state
def __del__(self):
self.delete()
def __setattr__(self, attribute, value):
object.__setattr__(self, attribute, value)
if not attribute.startswith('_'):
self.broadcast(attribute)
def __getitem__(self, view):
""" Retrieve the elements from a data view within the subset
:param view: View of the data. See data.__getitem__ for detils
"""
c, v = split_component_view(view)
ma = self.to_mask(v)
return self.data[view][ma]
@contract(other_subset='isinstance(Subset)')
def paste(self, other_subset):
"""paste subset state from other_subset onto self """
state = other_subset.subset_state.copy()
self.subset_state = state
def __str__(self):
dlabel = "(no data)"
if self.data is not None:
dlabel = "(data: %s)" % self.data.label
slabel = "Subset: (no label)"
if self.label:
slabel = "Subset: %s" % self.label
return "%s %s" % (slabel, dlabel)
def __repr__(self):
return self.__str__()
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __or__(self, other):
return _combine([self, other], operator.or_)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __and__(self, other):
return _combine([self, other], operator.and_)
@contract(returns='isinstance(Subset)')
def __invert__(self):
return _combine([self], operator.invert)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __xor__(self, other):
return _combine([self, other], operator.xor)
def __eq__(self, other):
if not isinstance(other, Subset):
return False
# XXX need to add equality specification for subset states
return (self.subset_state == other.subset_state and
self.style == other.style)
def state_as_mask(self):
"""
Convert the current SubsetState to a MaskSubsetState
"""
try:
m = self.to_mask()
except IncompatibleAttribute:
m = np.zeros(self.data.shape, dtype=np.bool)
cids = self.data.pixel_component_ids
return MaskSubsetState(m, cids)
# In Python 2 we need to do this explicitly
def __ne__(self, other):
return not self.__eq__(other)
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if PY3:
__hash__ = object.__hash__
class SubsetState(object):
def __init__(self):
pass
@property
def attributes(self):
return tuple()
@property
def subset_state(self): # convenience method, mimic interface of Subset
return self
@contract(data='isinstance(Data)')
def to_index_list(self, data):
return np.where(self.to_mask(data).flat)[0]
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
shp = view_shape(data.shape, view)
return np.zeros(shp, dtype=bool)
@contract(returns='isinstance(SubsetState)')
def copy(self):
return SubsetState()
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __or__(self, other_state):
return OrState(self, other_state)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __and__(self, other_state):
return AndState(self, other_state)
@contract(returns='isinstance(SubsetState)')
def __invert__(self):
return InvertState(self)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __xor__(self, other_state):
return XorState(self, other_state)
class RoiSubsetState(SubsetState):
def __init__(self, xatt=None, yatt=None, roi=None):
super(RoiSubsetState, self).__init__()
self.xatt = xatt
self.yatt = yatt
self.roi = roi
@property
def attributes(self):
return (self.xatt, self.yatt)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# TODO: make sure that pixel components don't actually take up much
# memory and are just views
x = data[self.xatt, view]
y = data[self.yatt, view]
if (x.ndim == data.ndim and
self.xatt in data.pixel_component_ids and
self.yatt in data.pixel_component_ids):
# This is a special case - the ROI is defined in pixel space, so we
# can apply it to a single slice and then broadcast it to all other
# dimensions. We start off by extracting a slice which takes only
# the first elements of all dimensions except the attributes in
# question, for which we take all the elements. We need to preserve
# the dimensionality of the array, hence the use of slice(0, 1).
# Note that we can only do this if the view (if present) preserved
# the dimensionality, which is why we checked that x.ndim == data.ndim
subset = []
for i in range(data.ndim):
if i == self.xatt.axis or i == self.yatt.axis:
subset.append(slice(None))
else:
subset.append(slice(0, 1))
x_slice = x[subset]
y_slice = y[subset]
if self.roi.defined():
result = self.roi.contains(x_slice, y_slice)
else:
result = np.zeros(x_slice.shape, dtype=bool)
result = broadcast_to(result, x.shape)
else:
if self.roi.defined():
result = self.roi.contains(x, y)
else:
result = np.zeros(x.shape, dtype=bool)
if result.shape != x.shape:
raise ValueError("Unexpected error: boolean mask has incorrect dimensions")
return result
def copy(self):
result = RoiSubsetState()
result.xatt = self.xatt
result.yatt = self.yatt
result.roi = self.roi
return result
class CategoricalROISubsetState(SubsetState):
def __init__(self, att=None, roi=None):
super(CategoricalROISubsetState, self).__init__()
self.att = att
self.roi = roi
@property
def attributes(self):
return self.att,
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data.get_component(self.att)._categorical_data[view]
result = self.roi.contains(x, None)
assert x.shape == result.shape
return result.ravel()
def copy(self):
result = CategoricalROISubsetState()
result.att = self.att
result.roi = self.roi
return result
@staticmethod
def from_range(component, att, lo, hi):
roi = CategoricalROI.from_range(component, lo, hi)
subset = CategoricalROISubsetState(roi=roi,
att=att)
return subset
class RangeSubsetState(SubsetState):
def __init__(self, lo, hi, att=None):
super(RangeSubsetState, self).__init__()
self.lo = lo
self.hi = hi
self.att = att
@property
def attributes(self):
return (self.att,)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.att, view]
result = (x >= self.lo) & (x <= self.hi)
return result
def copy(self):
return RangeSubsetState(self.lo, self.hi, self.att)
class MultiRangeSubsetState(SubsetState):
"""
A subset state defined by multiple discontinuous ranges
Parameters
----------
pairs : list
A list of (lo, hi) tuples
"""
def __init__(self, pairs, att=None):
super(MultiRangeSubsetState, self).__init__()
self.pairs = pairs
self.att = att
@property
def attributes(self):
return (self.att,)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.att, view]
result = np.zeros_like(x, dtype=bool)
for lo, hi in self.pairs:
result |= (x >= lo) & (x <= hi)
return result
def copy(self):
return MultiRangeSubsetState(self.pairs, self.att)
class CategoricalROISubsetState2D(object):
"""
A 2D subset state where both attributes are categorical.
Parameters
----------
categories : dict
A dictionary containing for each label of one categorical component an
interable of labels for the other categorical component (using sets will
provide the best performance)
att1 : :class:`~glue.core.component_id.ComponentID`
The component ID matching the keys of the ``categories`` dictionary
att2 : :class:`~glue.core.component_id.ComponentID`
The component ID matching the values of the ``categories`` dictionary
"""
def __init__(self, categories, att1, att2):
self.categories = categories
self.att1 = att1
self.att2 = att2
@property
def attributes(self):
return (self.att1, self.att2)
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# Extract categories and numerical values
labels1 = data.get_component(self.att1).labels
labels2 = data.get_component(self.att2).labels
if view is not None:
labels1 = labels1[view]
labels2 = labels2[view]
# Initialize empty mask
mask = np.zeros(labels1.shape, dtype=bool)
# A loop over all values here is actually reasonably efficient compared
# to alternatives. Any improved implementation, even vectorized, should
# ensure that it is more efficient for large numbers of categories and
# values.
for i in range(len(labels1)):
if labels1[i] in self.categories:
if labels2[i] in self.categories[labels1[i]]:
mask[i] = True
return mask
def copy(self):
result = CategoricalROISubsetState2D(self.categories,
self.att1, self.att2)
return result
class CategoricalMultiRangeSubsetState(SubsetState):
"""
A 2D subset state where one attribute is categorical and the other is
numerical, and where for each category, there are multiple possible subset
ranges.
Parameters
----------
ranges : dict
A dictionary containing for each category (key), a list of tuples
giving the ranges of values for the numerical attribute.
cat_att : :class:`~glue.core.component_id.ComponentID`
The component ID for the categorical attribute
num_att : :class:`~glue.core.component_id.ComponentID`
The component ID for the numerical attribute
"""
def __init__(self, ranges, cat_att, num_att):
self.ranges = ranges
self.cat_att = cat_att
self.num_att = num_att
@property
def attributes(self):
return (self.cat_att, self._num_att)
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# Extract categories and numerical values
labels = data.get_component(self.cat_att).labels
values = data[self.num_att]
if view is not None:
labels = labels[view]
values = values[view]
# Initialize empty mask
mask = np.zeros(values.shape, dtype=bool)
# A loop over all values here is actually reasonably efficient compared
# to alternatives. Any improved implementation, even vectorized, should
# ensure that it is more efficient for large numbers of categories and
# values. For example, using 10000 categories and 1000000 data points
# takes 1.2 seconds on a laptop.
for i in range(len(values)):
if labels[i] in self.ranges:
for lo, hi in self.ranges[labels[i]]:
if values[i] >= lo and values[i] <= hi:
mask[i] = True
break
return mask
def copy(self):
result = CategoricalMultiRangeSubsetState(self.ranges,
self.cat_att,
self.num_att)
return result
class CompositeSubsetState(SubsetState):
op = None
def __init__(self, state1, state2=None):
super(CompositeSubsetState, self).__init__()
self.state1 = state1.copy()
if state2:
state2 = state2.copy()
self.state2 = state2
def copy(self):
return type(self)(self.state1, self.state2)
@property
def attributes(self):
att = self.state1.attributes
if self.state2 is not None:
att += self.state2.attributes
return tuple(sorted(set(att)))
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return self.op(self.state1.to_mask(data, view),
self.state2.to_mask(data, view))
def __str__(self):
sym = OPSYM.get(self.op, self.op)
return "(%s %s %s)" % (self.state1, sym, self.state2)
class OrState(CompositeSubsetState):
op = operator.or_
class AndState(CompositeSubsetState):
op = operator.and_
class XorState(CompositeSubsetState):
op = operator.xor
class InvertState(CompositeSubsetState):
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return ~self.state1.to_mask(data, view)
def __str__(self):
return "(~%s)" % self.state1
class MaskSubsetState(SubsetState):
"""
A subset defined by boolean pixel mask
"""
def __init__(self, mask, cids):
"""
:param cids: List of ComponentIDs, defining the pixel coordinate space of the mask
:param mask: Boolean ndarray
"""
self.cids = cids
self.mask = mask
def copy(self):
return MaskSubsetState(self.mask, self.cids)
def to_mask(self, data, view=None):
view = view or slice(None)
# shortcut for data on the same pixel grid
if data.pixel_component_ids == self.cids:
return self.mask[view].copy()
# locate each element of data in the coordinate system of the mask
vals = [data[c, view].astype(np.int) for c in self.cids]
result = self.mask[vals]
for v, n in zip(vals, data.shape):
result &= ((v >= 0) & (v < n))
return result
def __gluestate__(self, context):
return dict(cids=[context.id(c) for c in self.cids],
mask=context.do(self.mask))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['mask']),
[context.object(c) for c in rec['cids']])
class CategorySubsetState(SubsetState):
def __init__(self, attribute, values):
super(CategorySubsetState, self).__init__()
self._attribute = attribute
self._values = np.asarray(values).ravel()
@memoize
def to_mask(self, data, view=None):
vals = data[self._attribute, view]
result = np.in1d(vals.ravel(), self._values)
return result.reshape(vals.shape)
def copy(self):
return CategorySubsetState(self._attribute, self._values.copy())
def __gluestate__(self, context):
return dict(att=context.id(self._attribute),
vals=context.do(self._values))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['att']),
context.object(rec['vals']))
class ElementSubsetState(SubsetState):
def __init__(self, indices=None, data=None):
super(ElementSubsetState, self).__init__()
self._indices = indices
if data is None:
self._data_uuid = None
else:
self._data_uuid = data.uuid
@memoize
def to_mask(self, data, view=None):
if data.uuid == self._data_uuid or self._data_uuid is None:
# XXX this is inefficient for views
result = np.zeros(data.shape, dtype=bool)
if self._indices is not None:
try:
result.flat[self._indices] = True
except IndexError:
if self._data_uuid is None:
raise IncompatibleAttribute()
else:
raise
if view is not None:
result = result[view]
return result
else:
raise IncompatibleAttribute()
def copy(self):
state = ElementSubsetState(indices=self._indices)
state._data_uuid = self._data_uuid
return state
def __gluestate__(self, context):
return dict(indices=context.do(self._indices),
data_uuid=self._data_uuid)
@classmethod
def __setgluestate__(cls, rec, context):
state = cls(indices=context.object(rec['indices']))
try:
state._data_uuid = rec['data_uuid']
except KeyError: # BACKCOMPAT
pass
return state
class InequalitySubsetState(SubsetState):
def __init__(self, left, right, op):
from glue.core.component_link import ComponentLink
super(InequalitySubsetState, self).__init__()
from glue.core.data import ComponentID
valid_ops = [operator.gt, operator.ge,
operator.lt, operator.le,
operator.eq, operator.ne]
if op not in valid_ops:
raise TypeError("Invalid boolean operator: %s" % op)
if not isinstance(left, ComponentID) and not \
isinstance(left, numbers.Number) and not \
isinstance(left, ComponentLink):
raise TypeError("Input must be ComponenID or NumberType: %s"
% type(left))
if not isinstance(right, ComponentID) and not \
isinstance(right, numbers.Number) and not \
isinstance(right, ComponentLink):
raise TypeError("Input must be ComponenID or NumberType: %s"
% type(right))
self._left = left
self._right = right
self._operator = op
@property
def left(self):
return self._left
@property
def right(self):
return self._right
@property
def operator(self):
return self._operator
@memoize
def to_mask(self, data, view=None):
left = self._left
if not isinstance(self._left, numbers.Number):
left = data[self._left, view]
right = self._right
if not isinstance(self._right, numbers.Number):
right = data[self._right, view]
return self._operator(left, right)
def copy(self):
return InequalitySubsetState(self._left, self._right, self._operator)
def __str__(self):
sym = OPSYM.get(self._operator, self._operator)
return "(%s %s %s)" % (self._left, sym, self._right)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
@contract(subsets='list(isinstance(Subset))', returns=Subset)
def _combine(subsets, operator):
state = operator(*[s.subset_state for s in subsets])
result = Subset(None)
result.subset_state = state
return result
def combine_multiple(subsets, operator):
if len(subsets) == 0:
return SubsetState()
else:
combined = subsets[0]
for subset in subsets[1:]:
combined = operator(combined, subset)
return combined
| {
"repo_name": "saimn/glue",
"path": "glue/core/subset.py",
"copies": "1",
"size": "31839",
"license": "bsd-3-clause",
"hash": -3877208879731483600,
"line_mean": 30.8071928072,
"line_max": 102,
"alpha_frac": 0.585728195,
"autogenerated": false,
"ratio": 4.1408505657432695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016126762149574895,
"num_lines": 1001
} |
from __future__ import absolute_import, division, print_function
import numbers
import os
import re
import subprocess
import sys
import decimal
import warnings
from functools import partial
from operator import attrgetter
from itertools import chain
from collections import Iterator
from datetime import datetime, date, timedelta
from distutils.spawn import find_executable
import numpy as np
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from sqlalchemy.dialects import mssql, postgresql
from multipledispatch import MDNotImplementedError
import datashape
from datashape.dispatch import dispatch
from datashape.predicates import isdimension, isrecord, isscalar, isdatelike
from datashape import (
DataShape, Record, Option, var, dshape, Map, discover,
datetime_, date_, float64, int64, int_, string, bytes_, float32,
)
from toolz import (partition_all, keyfilter, valfilter, identity, concat,
curry, merge, memoize)
from toolz.curried import pluck, map
from ..compatibility import unicode, StringIO
from ..directory import Directory
from ..utils import (
keywords,
ignoring,
iter_except,
filter_kwargs,
literal_compile,
)
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = int, float, datetime, date, bool, str, decimal.Decimal, timedelta
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
revtypes = dict(map(reversed, types.items()))
# Subclass mssql.TIMESTAMP subclass for use when differentiating between
# mssql.TIMESTAMP and sa.TIMESTAMP.
# At the time of this writing, (mssql.TIMESTAMP == sa.TIMESTAMP) is True,
# which causes a collision when defining the revtypes mappings.
#
# See:
# https://bitbucket.org/zzzeek/sqlalchemy/issues/4092/type-problem-with-mssqltimestamp
class MSSQLTimestamp(mssql.TIMESTAMP):
pass
# Assign the custom subclass as the type to use instead of `mssql.TIMESTAMP`.
mssql.base.ischema_names['TIMESTAMP'] = MSSQLTimestamp
revtypes.update({
sa.DATETIME: datetime_,
sa.TIMESTAMP: datetime_,
sa.FLOAT: float64,
sa.DATE: date_,
sa.BIGINT: int64,
sa.INTEGER: int_,
sa.BIGINT: int64,
sa.types.NullType: string,
sa.REAL: float32,
sa.Float: float64,
mssql.BIT: datashape.bool_,
mssql.DATETIMEOFFSET: string,
mssql.MONEY: float64,
mssql.SMALLMONEY: float32,
mssql.UNIQUEIDENTIFIER: string,
# The SQL Server TIMESTAMP value doesn't correspond to the ISO Standard
# It is instead just a binary(8) value with no relation to dates or times
MSSQLTimestamp: bytes_,
})
# Types which can be specified on precision.
# These are checked before checking membership in revtypes, because:
# 1) An instance of a precision type does not equal another instancec with
# the same precision.
# (DOUBLE_PRECISION(precision=53) != DOUBLE_PRECISION(precision=53)
# 2) Precision types can be a instance of a type in revtypes.
# isinstance(sa.Float(precision=53), sa.Float)
precision_types = {
sa.Float,
postgresql.base.DOUBLE_PRECISION
}
def precision_to_dtype(precision):
"""
Maps a float or double precision attribute to the desired dtype.
The mappings are as follows:
[1, 24] -> float32
[25, 53] -> float64
Values outside of those ranges raise a ``ValueError``.
Parameter
---------
precision : int
A double or float precision. e.g. the value returned by
`postgresql.base.DOUBLE_PRECISION(precision=53).precision`
Returns
-------
dtype : datashape.dtype (float32|float64)
The dtype to use for columns of the specified precision.
"""
if isinstance(precision, numbers.Integral):
if 1 <= precision <= 24:
return float32
elif 25 <= precision <= 53:
return float64
raise ValueError("{} is not a supported precision".format(precision))
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def getbind(t, bind):
if bind is None:
return t.bind
if isinstance(bind, sa.engine.interfaces.Connectable):
return bind
return create_engine(bind)
def batch(sel, chunksize=10000, bind=None):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowiterator(sel, chunksize=chunksize):
with getbind(sel, bind).connect() as conn:
result = conn.execute(sel)
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
columns = [col.name for col in sel.columns]
iterator = rowiterator(sel)
return columns, concat(iterator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.Interval(day_precision=0, second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if type(typ) in precision_types and typ.precision is not None:
return precision_to_dtype(typ.precision)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return revtypes[type(typ)]
if isinstance(typ, sa.Numeric):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, 'U8')
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.ForeignKey, sa.sql.FromClause)
def discover_foreign_key_relationship(fk, parent, parent_measure=None):
if fk.column.table is not parent:
parent_measure = discover(fk.column.table).measure
return {fk.parent.name: Map(discover(fk.parent.type), parent_measure)}
@discover.register(sa.sql.elements.ColumnClause)
def discover_sqlalchemy_column(c):
meta = Option if getattr(c, 'nullable', True) else identity
return Record([(c.name, meta(discover(c.type)))])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
ordering = {str(c): i for i, c in enumerate(c for c in t.columns.keys())}
record = list(_process_columns(t.columns))
fkeys = [discover(fkey, t, parent_measure=Record(record))
for fkey in t.foreign_keys]
for name, column in merge(*fkeys).items():
index = ordering[name]
_, key_type = record[index]
# If the foreign-key is nullable the column (map) key
# should be an Option type
if isinstance(key_type, Option):
column.key = Option(column.key)
record[index] = (name, column)
return var * Record(record)
def _process_columns(columns):
"""Process the dshapes of the columns of a table.
Parameters
----------
columns : iterable[column]
The columns to process.
Yields
------
record_entry : tuple[str, dshape]
A record entry containing the name and type of each column.
"""
for col in columns:
(name, dtype), = discover(col).fields
yield str(name), dtype
@memoize
def metadata_of_engine(engine, schema=None):
return sa.MetaData(engine, schema=schema)
def create_engine(uri, connect_args=None, **kwargs):
"""Creates a cached sqlalchemy engine.
This differs from ``sa.create_engine``\s api by only accepting
``uri`` positionally.
If the ``uri`` is an in memory sqlite database then this will not memioize
the engine.
"""
return (
_create_engine_hashable_args
if uri == 'sqlite:///:memory:' else
_memoized_create_engine_hashable_args
)(uri, connect_args=frozenset((connect_args or {}).items()), **kwargs)
def _create_engine_hashable_args(uri, connect_args=None, **kwargs):
"""Unpacks non-hashable args for ``sa.create_engine`` and puts that back
into whatever structure is expected.
"""
return sa.create_engine(
uri,
connect_args=dict(connect_args or {}),
**kwargs
)
_memoized_create_engine_hashable_args = memoize(_create_engine_hashable_args)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = sa.MetaData(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
return discover(metadata_of_engine(engine))
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
warnings.warn(
"Can not discover type of table {name}.\n"
"SQLAlchemy provided this error message:\n\t{msg}"
"\nSkipping.".format(
name=name,
msg=e.message,
),
stacklevel=3,
)
except NotImplementedError as e:
warnings.warn(
"Odo does not understand a SQLAlchemy type.\n"
"Odo provided the following error:\n\t{msg}"
"\nSkipping.".format(msg="\n\t".join(e.args)),
stacklevel=3,
)
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def validate_foreign_keys(ds, foreign_keys):
# passed foreign_keys and column in dshape, but not a ForeignKey type
for field in foreign_keys:
if field not in ds.measure.names:
raise TypeError('Requested foreign key field %r is not a field in '
'datashape %s' % (field, ds))
for field, typ in ds.measure.fields:
if field in foreign_keys and not isinstance(getattr(typ, 'ty', typ),
Map):
raise TypeError('Foreign key %s passed in but not a Map '
'datashape, got %s' % (field, typ))
if isinstance(typ, Map) and field not in foreign_keys:
raise TypeError('Map type %s found on column %s, but %r '
"wasn't found in %s" %
(typ, field, field, foreign_keys))
def dshape_to_table(name, ds, metadata=None, foreign_keys=None,
primary_key=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if not isrecord(ds.measure):
raise TypeError('dshape measure must be a record type e.g., '
'"{a: int64, b: int64}". Input measure is %r' %
ds.measure)
if metadata is None:
metadata = sa.MetaData()
if foreign_keys is None:
foreign_keys = {}
validate_foreign_keys(ds, foreign_keys)
cols = dshape_to_alchemy(ds, primary_key=primary_key or frozenset())
cols.extend(sa.ForeignKeyConstraint([column_name], [referent])
for column_name, referent in foreign_keys.items())
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, foreign_keys=None,
primary_key=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = metadata_of_engine(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return engine
def dshape_to_alchemy(dshape, primary_key=frozenset()):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Map):
return dshape_to_alchemy(dshape.key.measure, primary_key=primary_key)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty, primary_key=primary_key)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(getattr(typ, 'ty', typ),
primary_key=primary_key),
primary_key=name in primary_key,
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1], primary_key=primary_key)
else:
return dshape_to_alchemy(dshape[0], primary_key=primary_key)
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.TEXT
string_types = dict(U=sa.Unicode, A=sa.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
return sa.DATETIME(timezone=dshape.tz is not None)
if isinstance(dshape, datashape.Decimal):
return sa.NUMERIC(dshape.precision, dshape.scale)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, bind=None, **kwargs):
_, rows = batch(sa.select([t]), bind=bind)
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel, bind=bind)
return func(rows)
@convert.register(base, sa.sql.Select, cost=200.0)
def select_to_base(sel, dshape=None, bind=None, **kwargs):
with getbind(sel, bind).connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, bind=None, **kwargs):
assert not isinstance(t, type)
bind = getbind(t, bind)
if not t.exists(bind=bind):
t.create(bind=bind)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return t
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
dshape = dshape and datashape.dshape(dshape)
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
with bind.begin():
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
bind.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, bind=None, **kwargs):
t_bind = getbind(t, bind)
o_bind = getbind(o, bind)
if t_bind != o_bind:
return append(
t,
convert(Iterator, o, bind=bind, **kwargs),
bind=bind,
**kwargs
)
bind = t_bind
assert bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
bind.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(
obj,
'before_create',
ddl.execute_if(
callable_=should_create_schema,
dialect='postgresql'
)
)
return obj
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
engine = create_engine(
uri,
# roundtrip through a frozenset of tuples so we can cache the dict
connect_args=kwargs.pop('connect_args', {}),
**filter_kwargs(sa.create_engine, kwargs)
)
ds = kwargs.pop('dshape', None)
schema = kwargs.pop('schema', None)
foreign_keys = kwargs.pop('foreign_keys', None)
primary_key = kwargs.pop('primary_key', None)
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(
sa.Table(
table_name,
metadata,
autoload_with=engine,
),
schema,
)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema,
foreign_keys=foreign_keys)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table, bind=None):
bind = getbind(table, bind)
table.drop(bind=bind, checkfirst=True)
if table.exists(bind=bind):
raise ValueError('table %r dropped but still exists' % table.name)
metadata_of_engine(bind, schema=table.schema).remove(table)
@convert.register(sa.sql.Select, sa.Table, cost=0)
def table_to_select(t, **kwargs):
return t.select()
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=300.0)
def select_or_selectable_to_frame(el, bind=None, dshape=None, **kwargs):
bind = getbind(el, bind)
if bind.dialect.name == 'postgresql':
buf = StringIO()
append(CSV(None, buffer=buf), el, bind=bind, **kwargs)
buf.seek(0)
datetime_fields = []
other_dtypes = {}
optional_string_fields = []
try:
fields = dshape.measure.fields
except AttributeError:
fields = [(0, dshape.measure)]
for n, (field, dtype) in enumerate(fields):
if isdatelike(dtype):
datetime_fields.append(field)
elif isinstance(dtype, Option):
ty = dtype.ty
if ty in datashape.integral:
other_dtypes[field] = 'float64'
else:
other_dtypes[field] = ty.to_numpy_dtype()
if ty == string:
# work with integer column indices for the
# optional_string columns because we don't always
# know the column name and then the lookup will fail
# in the loop below.
optional_string_fields.append(n)
else:
other_dtypes[field] = dtype.to_numpy_dtype()
df = pd.read_csv(
buf,
parse_dates=datetime_fields,
dtype=other_dtypes,
skip_blank_lines=False,
escapechar=kwargs.get('escapechar', '\\'),
)
# read_csv really wants missing values to be NaN, but for
# string (object) columns, we want None to be missing
columns = df.columns
for field_ix in optional_string_fields:
# use ``df.loc[bool, df.columns[field_ix]]`` because you cannot do
# boolean slicing with ``df.iloc``.
field = columns[field_ix]
df.loc[df[field].isnull(), field] = None
return df
columns, rows = batch(el, bind=bind)
dtypes = {}
try:
fields = dshape.measure.fields
except AttributeError:
fields = [(columns[0], dshape.measure)]
for field, dtype in fields:
if isinstance(dtype, Option):
ty = dtype.ty
if ty in datashape.integral:
dtypes[field] = 'float64'
else:
try:
dtypes[field] = ty.to_numpy_dtype()
except TypeError:
dtypes[field] = np.dtype(object)
else:
try:
dtypes[field] = dtype.to_numpy_dtype()
except TypeError:
dtypes[field] = np.dtype(object)
return pd.DataFrame(np.array(list(map(tuple, rows)),
dtype=[(str(c), dtypes[c]) for c in columns]))
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(
self,
element,
path,
delimiter=',',
quotechar='"',
lineterminator='\n',
escapechar='\\',
header=True,
na_value='',
encoding=None,
bind=None,
):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
self._bind = bind = getbind(element, bind)
# mysql cannot write headers
self.header = header and bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
self.encoding = encoding
@property
def bind(self):
return self._bind
try:
from sqlalchemy.dialects.postgresql.psycopg2 import PGCompiler_psycopg2
except ImportError:
pass
else:
@partial(setattr, PGCompiler_psycopg2, 'visit_mod_binary')
def _postgres_visit_mod_binary(self, binary, operator, **kw):
"""Patched visit mod binary to work with literal_binds.
When https://github.com/zzzeek/sqlalchemy/pull/366 is merged we can
remove this patch.
"""
literal_binds = kw.get('literal_binds', False)
if (getattr(self.preparer, '_double_percents', True) and
not literal_binds):
return '{} %% {}'.format(
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
return '{} % {}'.format(
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
if isinstance(selectable, sa.Table):
selectable_part = compiler.preparer.format_table(selectable)
else:
selectable_part = '(%s)' % compiler.process(element.element, **kwargs)
return 'COPY %s TO STDOUT WITH (%s)' % (
selectable_part,
compiler.process(
sa.text(
"""
FORMAT CSV,
HEADER :header,
DELIMITER :delimiter,
QUOTE :quotechar,
NULL :na_value,
ESCAPE :escapechar,
ENCODING :encoding
""",
).bindparams(
header=element.header,
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar,
encoding=element.encoding or element.bind.execute(
'show client_encoding',
).scalar(),
),
**kwargs
),
)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""{0} INTO OUTFILE :path
CHARACTER SET :encoding
FIELDS TERMINATED BY :delimiter
OPTIONALLY ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
""".format(
compiler.process(
selectable.select()
if isinstance(selectable, sa.Table) else selectable,
**kwargs
)
)
).bindparams(
path=element.path,
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator
)
)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if element.encoding is not None:
raise ValueError(
"'encoding' keyword argument not supported for "
"SQLite to CSV conversion"
)
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
# we are sending a SQL string directorly to the SQLite process so we always
# need to bind everything before sending it
kwargs['literal_binds'] = True
selectable = element.element
sql = compiler.process(
selectable.select() if isinstance(selectable, sa.Table) else selectable,
**kwargs
) + ';'
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode(
sys.getfilesystemencoding() # we send bytes to the process
)
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
try:
from sqlalchemy_redshift.dialect import UnloadFromSelect
from odo.backends.aws import S3, get_s3_connection
except ImportError:
pass
else:
@resource.register('s3://.*/$')
def resource_s3_prefix(uri, **kwargs):
return Directory(S3)(uri, **kwargs)
@append.register(Directory(S3), sa.Table)
def redshit_to_s3_bucket(bucket, selectable, dshape=None, bind=None,
**kwargs):
s3_conn_kwargs = filter_kwargs(get_s3_connection, kwargs)
s3 = get_s3_connection(**s3_conn_kwargs)
unload_kwargs = filter_kwargs(UnloadFromSelect, kwargs)
unload_kwargs['unload_location'] = bucket.path
unload_kwargs['access_key_id'] = s3.access_key
unload_kwargs['secret_access_key'] = s3.secret_key
unload = UnloadFromSelect(selectable.select(), **unload_kwargs)
with getbind(selectable, bind).begin() as conn:
conn.execute(unload)
return bucket.path
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, bind=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(
selectable,
os.path.abspath(csv.path) if csv.path is not None else None,
bind=bind,
**kwargs
)
bind = getbind(selectable, bind)
if bind.dialect.name == 'postgresql':
with csv.open('ab+') as f:
with bind.begin() as conn:
conn.connection.cursor().copy_expert(literal_compile(stmt), f)
else:
with bind.begin() as conn:
conn.execute(stmt)
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
| {
"repo_name": "quantopian/odo",
"path": "odo/backends/sql.py",
"copies": "1",
"size": "34206",
"license": "bsd-3-clause",
"hash": -5080356345547088000,
"line_mean": 32.1774975752,
"line_max": 113,
"alpha_frac": 0.6049231129,
"autogenerated": false,
"ratio": 3.8703326544467074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971939143336901,
"avg_score": 0.0006633248019614179,
"num_lines": 1031
} |
from __future__ import absolute_import, division, print_function
import numbers
import toolz
import inspect
from toolz import unique, concat, compose, partial
import toolz
from pprint import pprint
from ..compatibility import StringIO, _strtypes, builtins
from ..dispatch import dispatch
__all__ = ['Node', 'path', 'common_subexpression', 'eval_str']
base = (numbers.Number,) + _strtypes
class Node(object):
""" Node in a tree
This serves as the base class for ``Expr``. This class holds all of the
tree traversal functions that are independent of tabular or array
computation. This is everything that we can do independent of the problem
domain. Note that datashape is not imported.
See Also
--------
blaze.expr.expressions.Expr
"""
__inputs__ = '_child',
def __init__(self, *args, **kwargs):
assert frozenset(kwargs).issubset(self.__slots__)
for slot, arg in zip(self.__slots__[1:], args):
setattr(self, slot, arg)
for key, value in kwargs.items():
setattr(self, key, value)
@property
def _args(self):
return tuple([getattr(self, slot) for slot in self.__slots__[1:]])
@property
def _inputs(self):
return tuple([getattr(self, i) for i in self.__inputs__])
def _leaves(self):
""" Leaves of an expression tree
All nodes without inputs. Leaves are returned in order, left to right.
>>> from blaze.expr import symbol, join, by
>>> t = symbol('t', 'var * {id: int32, name: string}')
>>> t._leaves()
[t]
>>> by(t.name, count=t.id.nunique())._leaves()
[t]
>>> v = symbol('v', 'var * {id: int32, city: string}')
>>> join(t, v)._leaves()
[t, v]
"""
if not self._inputs:
return [self]
else:
return list(unique(concat(i._leaves() for i in self._inputs if
isinstance(i, Node))))
def isidentical(self, other):
return isidentical(self, other)
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash((type(self), self._args))
return self._hash
def __str__(self):
rep = ["%s=%s" % (slot, _str(arg))
for slot, arg in zip(self.__slots__[1:], self._args)]
return "%s(%s)" % (type(self).__name__, ', '.join(rep))
def __repr__(self):
return str(self)
def _traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (arg._traverse() if isinstance(arg, Node) else [arg]
for arg in self._args)
for trav in traversals:
for item in trav:
yield item
def _subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount + 3
>>> expr._subs({3: 4, 'amount': 'id'}).isidentical(t.id + 4)
True
"""
return subs(self, d)
def _resources(self):
return toolz.merge([arg._resources() for arg in self._args
if isinstance(arg, Node)])
def _subterms(self):
return subterms(self)
def __contains__(self, other):
return other in set(self._subterms())
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.__init__(*state)
def __eq__(self, other):
ident = self.isidentical(other)
if ident is True:
return ident
try:
return self._eq(other)
except AttributeError:
# e.g., we can't compare whole tables to other things (yet?)
pass
return False
def __ne__(self, other):
return self._ne(other)
def __lt__(self, other):
return self._lt(other)
def __le__(self, other):
return self._le(other)
def __gt__(self, other):
return self._gt(other)
def __ge__(self, other):
return self._ge(other)
def __add__(self, other):
return self._add(other)
def __radd__(self, other):
return self._radd(other)
def __mul__(self, other):
return self._mul(other)
def __rmul__(self, other):
return self._rmul(other)
def __div__(self, other):
return self._div(other)
def __rdiv__(self, other):
return self._rdiv(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return self._floordiv(other)
def __rfloordiv__(self, other):
return self._rfloordiv(other)
def __sub__(self, other):
return self._sub(other)
def __rsub__(self, other):
return self._rsub(other)
def __pow__(self, other):
return self._pow(other)
def __rpow__(self, other):
return self._rpow(other)
def __mod__(self, other):
return self._mod(other)
def __rmod__(self, other):
return self._rmod(other)
def __or__(self, other):
return self._or(other)
def __ror__(self, other):
return self._ror(other)
def __and__(self, other):
return self._and(other)
def __rand__(self, other):
return self._rand(other)
def __neg__(self):
return self._neg()
def __invert__(self):
return self._invert()
def __abs__(self):
from .math import abs
return abs(self)
def isidentical(a, b):
""" Strict equality testing
Different from x == y -> Eq(x, y)
>>> isidentical(1, 1)
True
>>> from blaze.expr import symbol
>>> x = symbol('x', 'int')
>>> isidentical(x, 1)
False
>>> isidentical(x + 1, x + 1)
True
>>> isidentical(x + 1, x + 2)
False
>>> isidentical((x, x + 1), (x, x + 1))
True
>>> isidentical((x, x + 1), (x, x + 2))
False
"""
if isinstance(a, base) and isinstance(b, base):
return a == b
if type(a) != type(b):
return False
if isinstance(a, Node):
return all(map(isidentical, a._args, b._args))
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
return len(a) == len(b) and all(map(isidentical, a, b))
return a == b
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
return 'partial(%s, %s)' % (get_callable_name(o.func),
', '.join(map(str, o.args)))
try:
# python 3 makes builtins look nice
return o.__qualname__
except AttributeError:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return "'%s'" % s
elif callable(s):
return get_callable_name(s)
elif isinstance(s, Node):
return str(s)
else:
stream = StringIO()
pprint(s, stream=stream)
return stream.getvalue().rstrip()
@dispatch(Node)
def subterms(expr):
return concat([[expr], concat(map(subterms, expr._inputs))])
@dispatch(object)
def subterms(x):
yield x
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = dict((k, v) for k, v in d.items() if k is not v)
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except TypeError:
pass
return _subs(o, d)
@dispatch((tuple, list), dict)
def _subs(o, d):
return type(o)([subs(arg, d) for arg in o])
@dispatch(Node, dict)
def _subs(o, d):
"""
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> subs(t, {'balance': 'amount'}).fields
['name', 'amount']
"""
newargs = [subs(arg, d) for arg in o._args]
return type(o)(*newargs)
@dispatch(object, dict)
def _subs(o, d):
""" Private dispatched version of ``subs``
>>> subs('Hello', {})
'Hello'
"""
return o
def path(a, b):
""" A path of nodes from a to b
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount.sum()
>>> list(path(expr, t))
[sum(t.amount), t.amount, t]
"""
while not a.isidentical(b):
yield a
if not a._inputs:
break
for child in a._inputs:
if any(b.isidentical(node) for node in child._traverse()):
a = child
break
yield a
def common_subexpression(*exprs):
""" Common sub expression between subexpressions
Examples
--------
>>> from blaze.expr import symbol, common_subexpression
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> common_subexpression(t.x, t.y)
t
"""
sets = [set(subterms(t)) for t in exprs]
return builtins.max(set.intersection(*sets),
key=compose(len, str))
def eval_str(expr):
""" String suitable for evaluation
>>> from blaze.expr import symbol, eval_str
>>> x = symbol('x', 'real')
>>> eval_str(2*x + 1)
'(2 * x) + 1'
>>> from datetime import date
>>> eval_str(date(2000, 1, 20))
'datetime.date(2000, 1, 20)'
"""
from datetime import date, datetime
if isinstance(expr, (date, datetime)):
return repr(expr)
return repr(expr) if isinstance(expr, _strtypes) else str(expr)
def parenthesize(s):
"""
>>> parenthesize('1')
'1'
>>> parenthesize('1 + 2')
'(1 + 2)'
"""
if ' ' in s:
return '(%s)' % s
else:
return s
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/core.py",
"copies": "2",
"size": "10347",
"license": "bsd-3-clause",
"hash": -1437191260974576000,
"line_mean": 23.4033018868,
"line_max": 79,
"alpha_frac": 0.5321349183,
"autogenerated": false,
"ratio": 3.6743607954545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015161725067385447,
"num_lines": 424
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from collections import OrderedDict
import os
import re
from atom.api import Atom, Str, observe, List, Int, Bool, Typed
from skbeam.fluorescence import XrfElement as Element
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
from .fileio import save_fitdata_to_hdf
from .fit_spectrum import get_energy_bin_range
from ..core.map_processing import compute_selected_rois, TerminalProgressBar
import logging
logger = logging.getLogger(__name__)
class ROISettings(Atom):
"""
This class defines basic data structure for roi calculation.
Attributes
----------
prefix : str
prefix name
line_val : float
emission energy of primary line
left_val : float
left boundary
right_val : float
right boundary
default_left : float
default_right : float
step : float
min step value to change
show_plot : bool
option to plot
"""
prefix = Str()
line_val = Int()
left_val = Int()
right_val = Int()
default_left = Int()
default_right = Int()
step = Int(1)
show_plot = Bool(False)
@observe("left_val")
def _value_update(self, change):
if change["type"] == "create":
return
logger.debug("left value is changed {}".format(change))
@observe("show_plot")
def _plot_opt(self, change):
if change["type"] == "create":
return
logger.debug("show plot is changed {}".format(change))
class ROIModel(Atom):
"""
Control roi calculation according to given inputs.
Parameters
----------
parameters : Dict
parameter values used for fitting
data_dict : Dict
dict of 3D data
element_for_roi : str
inputs given by users
element_list_roi : list
list of elements after parsing
roi_dict : dict
dict of ROISettings object
enable_roi_computation : Bool
enables/disables GUI element that start ROI computation
At least one element must be selected and all entry in the element
list must be valid before ROI may be computed
result_folder : Str
directory which contains HDF5 file, in which results of processing are saved
hdf_path : Str
full path to the HDF5 file, in which results are saved
hdf_name : Str
name of the HDF file, in which results are saved
data_title : str
The title of the selected dataset (from ``fileio`` module)
data_title_base : str
The title changed for internal use (suffix is removed)
data_title_adjusted : str
The title changed for internal use (suffix 'sum' is removed if it exists)
suffix_name_roi : str
The suffix may have values 'sum', 'det1', 'det2' etc.
"""
# Reference to ParamModel object
param_model = Typed(object)
# Reference to FileIOModel object
io_model = Typed(object)
element_for_roi = Str()
element_list_roi = List()
roi_dict = OrderedDict()
enable_roi_computation = Bool(False)
subtract_background = Bool(False)
result_folder = Str()
hdf_path = Str()
hdf_name = Str()
data_title = Str()
data_title_base = Str()
data_title_adjusted = Str()
suffix_name_roi = Str()
def filename_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.hdf_name = change["value"]
# output to .h5 file
self.hdf_path = os.path.join(self.result_folder, self.hdf_name)
def result_folder_changed(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.result_folder = change["value"]
def data_title_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.data_title = change["value"]
# It is assumed, that ``self.data_title`` was created in the ``fileio`` module
# and has dataset label attached to the end of it.
# The labels are ``sum``, ``det1``, ``det2`` etc. depending on the number
# of detector channels.
self.suffix_name_roi = self.data_title.split("_")[-1]
self.data_title_base = "_".join(self.data_title.split("_")[:-1])
if self.suffix_name_roi == "sum":
# If suffix is 'sum', then remove the suffix
self.data_title_adjusted = self.data_title_base
else:
# Else keep the original title
self.data_title_adjusted = self.data_title
def __init__(self, *, param_model, io_model):
# Initialize with an empty string (no elements selected)
self.param_model = param_model
self.io_model = io_model
self.element_for_roi = ""
self.enable_roi_computation = False
@observe("element_for_roi")
def _update_element(self, change):
"""
Get element information as a string and parse it as a list.
This element information means the ones for roi setup.
"""
self.element_for_roi = self.element_for_roi.strip(" ")
# Remove leading and trailing ','
self.element_for_roi = self.element_for_roi.strip(",")
# Remove leading and trailing '.'
self.element_for_roi = self.element_for_roi.strip(".")
try:
if len(self.element_for_roi) == 0:
logger.debug("No elements entered.")
self.remove_all_roi()
self.element_list_roi = []
self.enable_roi_computation = False
return
elif "," in self.element_for_roi:
element_list = [v.strip(" ") for v in self.element_for_roi.split(",")]
else:
element_list = [v for v in self.element_for_roi.split(" ")]
# with self.suppress_notifications():
# self.element_list_roi = element_list
logger.debug("Current elements for ROI sum are: {}".format(element_list))
self.update_roi(element_list)
self.element_list_roi = element_list
self.enable_roi_computation = True
except Exception as ex:
logger.warning(f"Incorrect specification of element lines for ROI computation: {ex}")
self.enable_roi_computation = False
def select_elements_from_list(self, element_list):
self.element_for_roi = ", ".join(element_list)
def use_all_elements(self):
self.element_for_roi = ", ".join(K_LINE + L_LINE) # +M_LINE)
def clear_selected_elements(self):
self.element_for_roi = ""
def remove_all_roi(self):
self.roi_dict.clear()
def update_roi(self, element_list, std_ratio=4):
"""
Update elements without touching old ones.
Parameters
----------
element_list : list
list of elements for roi
std_ratio : float, optional
Define the range of roi for given element.
Notes
-----
The unit of energy is in ev in this function. The reason is
SpinBox in Enaml can only read integer as input. To be updated.
"""
eline_list = K_LINE + L_LINE + M_LINE
for v in element_list:
if v in self.roi_dict:
continue
if v not in eline_list:
raise ValueError(f"Emission line {v} is unknown")
if "_K" in v:
temp = v.split("_")[0]
e = Element(temp)
val = int(e.emission_line["ka1"] * 1000)
elif "_L" in v:
temp = v.split("_")[0]
e = Element(temp)
val = int(e.emission_line["la1"] * 1000)
elif "_M" in v:
temp = v.split("_")[0]
e = Element(temp)
val = int(e.emission_line["ma1"] * 1000)
delta_v = int(self.get_sigma(val / 1000) * 1000)
roi = ROISettings(
prefix=self.suffix_name_roi,
line_val=val,
left_val=val - delta_v * std_ratio,
right_val=val + delta_v * std_ratio,
default_left=val - delta_v * std_ratio,
default_right=val + delta_v * std_ratio,
step=1,
show_plot=False,
)
self.roi_dict.update({v: roi})
# remove old items not included in element_list
for k in self.roi_dict.copy().keys():
if k not in element_list:
del self.roi_dict[k]
def get_sigma(self, energy, epsilon=2.96):
"""
Calculate the std at given energy.
"""
temp_val = 2 * np.sqrt(2 * np.log(2))
return np.sqrt(
(self.param_model.param_new["fwhm_offset"]["value"] / temp_val) ** 2
+ energy * epsilon * self.param_model.param_new["fwhm_fanoprime"]["value"]
)
def get_roi_sum(self):
"""
Save roi sum into a dict.
Returns
-------
dict
nested dict as output
"""
roi_result = {}
datav = self.io_model.data_sets[self.data_title].raw_data
logger.info(f"Computing ROIs for dataset {self.data_title} ...")
snip_param = {
"e_offset": self.param_model.param_new["e_offset"]["value"],
"e_linear": self.param_model.param_new["e_linear"]["value"],
"e_quadratic": self.param_model.param_new["e_quadratic"]["value"],
"b_width": self.param_model.param_new["non_fitting_values"]["background_width"],
}
n_bin_low, n_bin_high = get_energy_bin_range(
num_energy_bins=datav.shape[2],
low_e=self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"],
high_e=self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"],
e_offset=self.param_model.param_new["e_offset"]["value"],
e_linear=self.param_model.param_new["e_linear"]["value"],
)
# Prepare the 'roi_dict' parameter for computations
roi_dict = {
_: (self.roi_dict[_].left_val / 1000.0, self.roi_dict[_].right_val / 1000.0)
for _ in self.roi_dict.keys()
}
roi_dict_computed = compute_selected_rois(
data=datav,
data_sel_indices=(n_bin_low, n_bin_high),
roi_dict=roi_dict,
snip_param=snip_param,
use_snip=self.subtract_background,
chunk_pixels=5000,
n_chunks_min=4,
progress_bar=TerminalProgressBar("Computing ROIs: "),
client=None,
)
# Save ROI data to HDF5 file
self.saveROImap_to_hdf(roi_dict_computed)
# Add scalers to the ROI dataset, so that they can be selected from Image Wizard.
# We don't want to save scalers to the file, since they are already in the file.
# So we add scalers after data is saved.
scaler_key = f"{self.data_title_base}_scaler"
if scaler_key in self.io_model.img_dict:
roi_dict_computed.update(self.io_model.img_dict[scaler_key])
roi_result[f"{self.data_title_adjusted}_roi"] = roi_dict_computed
logger.info("ROI is computed.")
return roi_result
def saveROImap_to_hdf(self, data_dict_roi):
# Generate the path to computed ROIs in the HDF5 file
det_name = "detsum" # Assume that ROIs are computed using the sum of channels
# Search for channel name in the data title. Channels are named
# det1, det2, ... , i.e. 'det' followed by integer number.
# The channel name is always located at the end of the ``data_title``.
# If the channel name is found, then build the path using this name.
srch = re.search("det\d+$", self.data_title) # noqa: W605
if srch:
det_name = srch.group(0)
inner_path = f"xrfmap/{det_name}"
try:
save_fitdata_to_hdf(
self.hdf_path,
data_dict_roi,
datapath=inner_path,
data_saveas="xrf_roi",
dataname_saveas="xrf_roi_name",
)
except Exception as ex:
logger.error(f"Failed to save ROI data to file '{self.hdf_path}'\n Exception: {ex}")
else:
logger.info(f"ROI data was successfully saved to file '{self.hdf_name}'")
| {
"repo_name": "NSLS-II/PyXRF",
"path": "pyxrf/model/roi_model.py",
"copies": "1",
"size": "13148",
"license": "bsd-3-clause",
"hash": -9179613348779383000,
"line_mean": 32.6265984655,
"line_max": 99,
"alpha_frac": 0.5715698205,
"autogenerated": false,
"ratio": 3.9119309729247247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4983500793424725,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import dshape, DataShape, Option, DateTime, string, TimeDelta
from datashape import Date, to_numpy_dtype, Tuple, String, Decimal
from datashape.predicates import isscalar, isnumeric, isrecord
def unit_to_dtype(ds):
""" Convert a datashape Unit instance into a numpy dtype
Parameters
----------
ds : DataShape
The DataShape instance to convert
Returns
-------
np.dtype
Examples
--------
>>> unit_to_dtype('int32')
dtype('int32')
>>> unit_to_dtype('float64')
dtype('float64')
>>> unit_to_dtype('?int64')
dtype('float64')
>>> unit_to_dtype('string')
dtype('O')
>>> unit_to_dtype('?datetime')
dtype('<M8[us]')
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape):
ds = ds.measure
if isinstance(ds, Option) and isscalar(ds) and isnumeric(ds):
if isinstance(ds.ty, Decimal):
return unit_to_dtype(
str(ds.ty.to_numpy_dtype()).replace('int', 'float')
)
return unit_to_dtype(str(ds).replace('int', 'float').replace('?', ''))
if isinstance(ds, Option) and isinstance(
ds.ty, (Date, DateTime, String, TimeDelta)
):
ds = ds.ty
if ds == string:
return np.dtype('O')
return to_numpy_dtype(ds)
def dshape_to_numpy(ds):
""" Convert a datashape to a NumPy dtype
Parameters
----------
ds : DataShape
The DataShape instance to convert
Returns
-------
np.dtype
Examples
--------
>>> dshape_to_numpy('int32')
dtype('int32')
>>> dshape_to_numpy('?int32')
dtype('float32')
>>> dshape_to_numpy('{name: string[5, "ascii"], amount: ?int32}')
dtype([('name', 'S5'), ('amount', '<f4')])
>>> dshape_to_numpy('(int32, float32)')
dtype([('f0', '<i4'), ('f1', '<f4')])
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape):
ds = ds.measure
if isrecord(ds):
return np.dtype([
(str(name), unit_to_dtype(typ))
for name, typ in zip(ds.names, ds.types)
])
if isinstance(ds, Tuple):
return np.dtype([
('f%d' % i, unit_to_dtype(typ))
for i, typ in enumerate(ds.parameters[0])
])
else:
return unit_to_dtype(ds)
def dshape_to_pandas(ds):
""" Convert a datashape to a pair of
``({name1: dtype1, name2: dtype2, ...}, [datecol1, datecol2, ...])``
Parameters
----------
ds : DataShape
The DataShape instance to convert
Returns
-------
({str: np.dtype}, [str])
Examples
--------
>>> dshape_to_pandas('{a: int32}') # doctest: +SKIP
({'a': dtype('int32')}, [])
>>> dshape_to_pandas('{a: int32, when: datetime}') # doctest: +SKIP
({'a': dtype('int32')}, ['when'])
>>> dshape_to_pandas('{a: ?int64}') # doctest: +SKIP
({'a': dtype('float64')}, [])
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
dtypes = {
name: (
np.dtype('object')
if isinstance(typ, String) else unit_to_dtype(typ)
)
for name, typ in ds.measure.dict.items() if 'date' not in str(typ)
}
datetimes = [
name for name, typ in ds.measure.dict.items() if 'date' in str(typ)
]
return dtypes, datetimes
| {
"repo_name": "cpcloud/odo",
"path": "odo/numpy_dtype.py",
"copies": "1",
"size": "3524",
"license": "bsd-3-clause",
"hash": -3612319085764110300,
"line_mean": 24.9117647059,
"line_max": 78,
"alpha_frac": 0.5428490352,
"autogenerated": false,
"ratio": 3.4925668979187314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45354159331187316,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import *
from datashape.predicates import isscalar, isnumeric
def unit_to_dtype(ds):
"""
>>> unit_to_dtype('int32')
dtype('int32')
>>> unit_to_dtype('float64')
dtype('float64')
>>> unit_to_dtype('?int64')
dtype('float64')
>>> unit_to_dtype('string')
dtype('O')
>>> unit_to_dtype('?datetime')
dtype('<M8[us]')
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape):
ds = ds.measure
if isinstance(ds, Option) and isscalar(ds) and isnumeric(ds):
return unit_to_dtype(str(ds).replace('int', 'float').replace('?', ''))
if isinstance(ds, Option) and ds.ty in (date_, datetime_, string):
ds = ds.ty
if ds == string:
return np.dtype('O')
return to_numpy_dtype(ds)
def dshape_to_numpy(ds):
"""
>>> dshape_to_numpy('int32')
dtype('int32')
>>> dshape_to_numpy('?int32')
dtype('float32')
>>> dshape_to_numpy('{name: string[5, "ascii"], amount: ?int32}')
dtype([('name', 'S5'), ('amount', '<f4')])
>>> dshape_to_numpy('(int32, float32)')
dtype([('f0', '<i4'), ('f1', '<f4')])
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape):
ds = ds.measure
if isrecord(ds):
return np.dtype([(str(name), unit_to_dtype(typ))
for name, typ in zip(ds.names, ds.types)])
if isinstance(ds, Tuple):
return np.dtype([('f%d' % i, unit_to_dtype(typ))
for i, typ in enumerate(ds.parameters[0])])
else:
return unit_to_dtype(ds)
def dshape_to_pandas(ds):
"""
>>> dshape_to_pandas('{a: int32}')
({'a': dtype('int32')}, [])
>>> dshape_to_pandas('{a: int32, when: datetime}')
({'a': dtype('int32')}, ['when'])
>>> dshape_to_pandas('{a: ?int64}')
({'a': dtype('float64')}, [])
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
dtypes = dict((name, unit_to_dtype(typ))
for name, typ in ds.measure.dict.items()
if not 'date' in str(typ))
datetimes = [name for name, typ in ds.measure.dict.items()
if 'date' in str(typ)]
return dtypes, datetimes
| {
"repo_name": "mrocklin/into",
"path": "into/numpy_dtype.py",
"copies": "1",
"size": "2367",
"license": "bsd-3-clause",
"hash": 180450960615548160,
"line_mean": 26.523255814,
"line_max": 78,
"alpha_frac": 0.5449936629,
"autogenerated": false,
"ratio": 3.246913580246914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9284640076979226,
"avg_score": 0.0014534332335376379,
"num_lines": 86
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from dynd import nd
import datashape
from . import IDataDescriptor, Capabilities
from ..optional_packages import tables_is_here
if tables_is_here:
import tables as tb
from .dynd_data_descriptor import DyNDDataDescriptor
# WARNING! PyTables always return NumPy arrays when doing indexing
# operations. This is why DyNDDataDescriptor is used for returning
# the values here.
def hdf5_descriptor_iter(h5arr):
for i in range(len(h5arr)):
# PyTables doesn't have a convenient way to avoid collapsing
# to a scalar, this is a way to avoid that
el = np.array(h5arr[i], dtype=h5arr.dtype)
yield DyNDDataDescriptor(nd.array(el))
h5arr._v_file.close()
class HDF5DataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes a HDF5 dataset.
"""
def __init__(self, filename, datapath):
self.filename = filename
self.datapath = datapath
with tb.open_file(self.filename, mode='r') as f:
obj = f.get_node(f.root, self.datapath)
# We are going to support both homogeneous and heterogeneous
# datasets, but not VL types (VLArray) for the time being.
if not isinstance(obj, (tb.Array, tb.Table)):
raise TypeError(('object is not a supported HDF5 dataset, '
'it has type %r') % type(obj))
@property
def dshape(self):
# This cannot be cached because the Array can change the dshape
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
odshape = datashape.from_numpy(h5arr.shape, h5arr.dtype)
return odshape
@property
def capabilities(self):
"""The capabilities for the HDF5 arrays."""
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
appendable = isinstance(h5arr, (tb.EArray, tb.Table)),
caps = Capabilities(
# HDF5 arrays can be updated
immutable = False,
# HDF5 arrays are concrete
deferred = False,
# HDF5 arrays are persistent
persistent = True,
# HDF5 arrays can be appended efficiently (EArrays and Tables)
appendable = appendable,
remote = False,
)
return caps
def dynd_arr(self):
# Positionate at the beginning of the file
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
h5arr = nd.array(h5arr[:], dtype=h5arr.dtype)
return h5arr
def __array__(self):
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
h5arr = h5arr[:]
return h5arr
def __len__(self):
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
arrlen = len(h5arr)
return arrlen
def __getitem__(self, key):
with tb.open_file(self.filename, mode='r') as f:
h5arr = f.get_node(f.root, self.datapath)
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
dyndarr = nd.asarray(h5arr[key], access='readonly')
return DyNDDataDescriptor(dyndarr)
def __setitem__(self, key, value):
# HDF5 arrays can be updated
with tb.open_file(self.filename, mode='a') as f:
h5arr = f.get_node(f.root, self.datapath)
h5arr[key] = value
def __iter__(self):
f = tb.open_file(self.filename, mode='r')
h5arr = f.get_node(f.root, self.datapath)
return hdf5_descriptor_iter(h5arr)
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
with tb.open_file(self.filename, mode='a') as f:
h5arr = f.get_node(f.root, self.datapath)
h5arr.append(values_arr.reshape(shape_vals))
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/datadescriptor/hdf5_data_descriptor.py",
"copies": "8",
"size": "4450",
"license": "bsd-3-clause",
"hash": -1794576393004862000,
"line_mean": 36.3949579832,
"line_max": 75,
"alpha_frac": 0.6017977528,
"autogenerated": false,
"ratio": 3.641571194762684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002699004623645448,
"num_lines": 119
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from dynd import nd
import datashape
from . import IDataDescriptor, Capabilities
import blz
from .dynd_data_descriptor import DyNDDataDescriptor
# WARNING! BLZ always return NumPy arrays when doing indexing
# operations. This is why DyNDDataDescriptor is used for returning
# the values here.
def blz_descriptor_iter(blzarr):
for i in range(len(blzarr)):
# BLZ doesn't have a convenient way to avoid collapsing
# to a scalar, this is a way to avoid that
el = np.array(blzarr[i], dtype=blzarr.dtype)
yield DyNDDataDescriptor(nd.array(el))
class BLZDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes a BLZ array.
"""
def __init__(self, obj):
# This is a low level interface, so strictly
# require a BLZ barray here
if not isinstance(obj, blz.barray):
raise TypeError(('object is not a blz array, '
'it has type %r') % type(obj))
self.blzarr = obj
@property
def dshape(self):
# This cannot be cached because the BLZ can change the dshape
obj = self.blzarr
return datashape.from_numpy(obj.shape, obj.dtype)
@property
def capabilities(self):
"""The capabilities for the BLZ arrays."""
return Capabilities(
# BLZ arrays can be updated
immutable = False,
# BLZ arrays are concrete
deferred = False,
# BLZ arrays can be either persistent of in-memory
persistent = self.blzarr.rootdir is not None,
# BLZ arrays can be appended efficiently
appendable = True,
remote = False,
)
def __array__(self):
return np.array(self.blzarr)
def __len__(self):
# BLZ arrays are never scalars
return len(self.blzarr)
def __getitem__(self, key):
blzarr = self.blzarr
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
return DyNDDataDescriptor(nd.asarray(blzarr[key], access='readonly'))
def __setitem__(self, key, value):
# We decided that BLZ should be read and append only
raise NotImplementedError
def __iter__(self):
return blz_descriptor_iter(self.blzarr)
# This is not part of the DataDescriptor interface itself, but can
# be handy for other situations not requering full compliance with
# it.
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
self.blzarr.append(values_arr.reshape(shape_vals))
self.blzarr.flush()
def iterchunks(self, blen=None, start=None, stop=None):
"""Return chunks of size `blen` (in leading dimension).
Parameters
----------
blen : int
The length, in rows, of the buffers that are returned.
start : int
Where the iterator starts. The default is to start at the
beginning.
stop : int
Where the iterator stops. The default is to stop at the end.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arays of
homogeneous or structured types, depending on whether
`self.original` is a barray or a btable object.
See Also
--------
wherechunks
"""
# Return the iterable
return blz.iterblocks(self.blzarr, blen, start, stop)
def wherechunks(self, expression, blen=None, outfields=None, limit=None,
skip=0):
"""Return chunks fulfilling `expression`.
Iterate over the rows that fullfill the `expression` condition
on Table `self.original` in blocks of size `blen`.
Parameters
----------
expression : string or barray
A boolean Numexpr expression or a boolean barray.
blen : int
The length of the block that is returned. The default is the
chunklen, or for a btable, the minimum of the different column
chunklens.
outfields : list of strings or string
The list of column names that you want to get back in results.
Alternatively, it can be specified as a string such as 'f0 f1' or
'f0, f1'. If None, all the columns are returned.
limit : int
A maximum number of elements to return. The default is return
everything.
skip : int
An initial number of elements to skip. The default is 0.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arrays made of
structured types (or homogeneous ones in case `outfields` is a
single field.
See Also
--------
iterchunks
"""
# Return the iterable
return blz.whereblocks(self.blzarr, expression, blen, outfields,
limit, skip)
| {
"repo_name": "XinSong/blaze",
"path": "blaze/datadescriptor/blz_data_descriptor.py",
"copies": "7",
"size": "5451",
"license": "bsd-3-clause",
"hash": 4993300301379388000,
"line_mean": 33.06875,
"line_max": 77,
"alpha_frac": 0.5997064759,
"autogenerated": false,
"ratio": 4.295508274231678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018850640923777744,
"num_lines": 160
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from functools import partial, wraps
from math import factorial
from toolz import compose
from .core import _concatenate2, Array, atop, sqrt, elemwise
from .slicing import insert_many
from .numpy_compat import divide
from ..core import flatten
from . import chunk
from ..utils import ignoring, getargspec
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype and 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype and 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, partial(_concatenate2, axes=axis)),
inds2, tmp, inds, dtype=dtype)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
chunks = insert_many(result.chunks, axis, [1])
return Array(dsk, result.name, chunks=chunks, dtype=dtype)
else:
return result
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.argmin)
def argmin(a, axis=None):
return arg_reduction(a, chunk.min, chunk.argmin, axis=axis, dtype='i8')
@wraps(chunk.nanargmin)
def nanargmin(a, axis=None):
return arg_reduction(a, chunk.nanmin, chunk.nanargmin, axis=axis,
dtype='i8')
@wraps(chunk.argmax)
def argmax(a, axis=None):
return arg_reduction(a, chunk.max, chunk.argmax, axis=axis, dtype='i8')
@wraps(chunk.nanargmax)
def nanargmax(a, axis=None):
return arg_reduction(a, chunk.nanmax, chunk.nanargmax, axis=axis,
dtype='i8')
@wraps(chunk.any)
def any(a, axis=None, keepdims=False):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.all)
def all(a, axis=None, keepdims=False):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).nanprod().dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmin, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmax, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, dtype='f8', **kwargs):
return divide(pair['total'].sum(dtype=dtype, **kwargs),
pair['n'].sum(dtype=dtype, **kwargs), dtype=dtype)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt)
def nanmean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def moment_chunk(A, order=2, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
total = sum(A, dtype=dtype, **kwargs)
n = numel(A, **kwargs)
u = total/n
M = np.empty(shape=n.shape + (order - 1,), dtype=dtype)
for i in range(2, order + 1):
M[..., i - 2] = sum((A - u)**i, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', M.dtype, (order-1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def moment_agg(data, order=2, ddof=0, dtype='f8', **kwargs):
totals = data['total']
ns = data['n']
Ms = data['M']
kwargs['dtype'] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw['keepdims'] = True
n = ns.sum(**keepdim_kw)
mu = divide(totals.sum(**keepdim_kw), n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
result = Ms[..., -1].sum(**kwargs)
for k in range(1, order - 1):
coeff = factorial(order)/(factorial(k)*factorial(order - k))
result += coeff * (Ms[..., order - k - 2] * inner_term**k).sum(**kwargs)
result += (ns * inner_term**order).sum(**kwargs)
result = divide(result, (n.sum(**kwargs) - ddof), dtype=dtype)
return result
def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0):
if not isinstance(order, int) or order < 2:
raise ValueError("Order must be an integer >= 2")
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, order=order), partial(moment_agg,
order=order, ddof=ddof), axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, moment_chunk, partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
def arg_aggregate(func, argfunc, dims, pairs):
"""
>>> pairs = [([4, 3, 5], [10, 11, 12]),
... ([3, 5, 1], [1, 2, 3])]
>>> arg_aggregate(np.min, np.argmin, (100, 100), pairs)
array([101, 11, 103])
"""
pairs = list(pairs)
mins, argmins = zip(*pairs)
mins = np.array(mins)
argmins = np.array(argmins)
args = argfunc(mins, axis=0)
offsets = np.add.accumulate([0] + list(dims)[:-1])
offsets = offsets.reshape((len(offsets),) + (1,) * (argmins.ndim - 1))
return np.choose(args, argmins + offsets)
def arg_reduction(a, func, argfunc, axis=0, dtype=None):
""" General version of argmin/argmax
>>> arg_reduction(my_array, np.min, axis=0) # doctest: +SKIP
"""
if not isinstance(axis, int):
raise ValueError("Must specify integer axis= keyword argument.\n"
"For example:\n"
" Before: x.argmin()\n"
" After: x.argmin(axis=0)\n")
if axis < 0:
axis = a.ndim + axis
def argreduce(x):
""" Get both min/max and argmin/argmax of each block """
return (func(x, axis=axis), argfunc(x, axis=axis))
a2 = elemwise(argreduce, a)
return atop(partial(arg_aggregate, func, argfunc, a.chunks[axis]),
[i for i in range(a.ndim) if i != axis],
a2, list(range(a.ndim)), dtype=dtype)
| {
"repo_name": "ssanderson/dask",
"path": "dask/array/reductions.py",
"copies": "10",
"size": "11734",
"license": "bsd-3-clause",
"hash": -6525226603789796000,
"line_mean": 30.5430107527,
"line_max": 85,
"alpha_frac": 0.5996250213,
"autogenerated": false,
"ratio": 3.3240793201133143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8923704341413314,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from functools import partial, wraps
from toolz import compose, curry
import inspect
from .core import _concatenate2, Array, atop, sqrt, elemwise
from .slicing import insert_many
from ..core import flatten
from . import chunk
from ..utils import ignoring
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype and 'dtype' in inspect.getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype and 'dtype' in inspect.getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, curry(_concatenate2, axes=axis)),
inds2, tmp, inds, dtype=dtype)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
chunks = insert_many(result.chunks, axis, [1])
return Array(dsk, result.name, chunks=chunks, dtype=dtype)
else:
return result
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.argmin)
def argmin(a, axis=None):
return arg_reduction(a, chunk.min, chunk.argmin, axis=axis, dtype='i8')
@wraps(chunk.nanargmin)
def nanargmin(a, axis=None):
return arg_reduction(a, chunk.nanmin, chunk.nanargmin, axis=axis,
dtype='i8')
@wraps(chunk.argmax)
def argmax(a, axis=None):
return arg_reduction(a, chunk.max, chunk.argmax, axis=axis, dtype='i8')
@wraps(chunk.nanargmax)
def nanargmax(a, axis=None):
return arg_reduction(a, chunk.nanmax, chunk.nanargmax, axis=axis,
dtype='i8')
@wraps(chunk.any)
def any(a, axis=None, keepdims=False):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.all)
def all(a, axis=None, keepdims=False):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).nanprod().dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmin, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmax, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, **kwargs):
n = numel(x, **kwargs)
total = sum(x, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, **kwargs):
return pair['total'].sum(**kwargs) / pair['n'].sum(**kwargs)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt)
def nanmean(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def var_chunk(A, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(A, **kwargs)
x = sum(A, dtype=dtype, **kwargs)
x2 = sum(A**2, dtype=dtype, **kwargs)
result = np.empty(shape=n.shape, dtype=[('x', x.dtype),
('x2', x2.dtype),
('n', n.dtype)])
result['x'] = x
result['x2'] = x2
result['n'] = n
return result
def var_agg(A, ddof=None, **kwargs):
x = A['x'].sum(**kwargs)
x2 = A['x2'].sum(**kwargs)
n = A['n'].sum(**kwargs)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
if a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, var_chunk, partial(var_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.var(np.ones(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(var_chunk, sum=chunk.nansum, numel=nannumel),
partial(var_agg, ddof=ddof), axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0):
return sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0):
return sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof))
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, dtype=dtype, keepdims=keepdims)**(1./ord)
def arg_aggregate(func, argfunc, dims, pairs):
"""
>>> pairs = [([4, 3, 5], [10, 11, 12]),
... ([3, 5, 1], [1, 2, 3])]
>>> arg_aggregate(np.min, np.argmin, (100, 100), pairs)
array([101, 11, 103])
"""
pairs = list(pairs)
mins, argmins = zip(*pairs)
mins = np.array(mins)
argmins = np.array(argmins)
args = argfunc(mins, axis=0)
offsets = np.add.accumulate([0] + list(dims)[:-1])
offsets = offsets.reshape((len(offsets),) + (1,) * (argmins.ndim - 1))
return np.choose(args, argmins + offsets)
def arg_reduction(a, func, argfunc, axis=0, dtype=None):
""" General version of argmin/argmax
>>> arg_reduction(my_array, np.min, axis=0) # doctest: +SKIP
"""
if not isinstance(axis, int):
raise ValueError("Must specify integer axis= keyword argument.\n"
"For example:\n"
" Before: x.argmin()\n"
" After: x.argmin(axis=0)\n")
if axis < 0:
axis = a.ndim + axis
def argreduce(x):
""" Get both min/max and argmin/argmax of each block """
return (func(x, axis=axis), argfunc(x, axis=axis))
a2 = elemwise(argreduce, a)
return atop(partial(arg_aggregate, func, argfunc, a.chunks[axis]),
[i for i in range(a.ndim) if i != axis],
a2, list(range(a.ndim)), dtype=dtype)
| {
"repo_name": "marianotepper/dask",
"path": "dask/array/reductions.py",
"copies": "2",
"size": "10067",
"license": "bsd-3-clause",
"hash": -4484792602106028500,
"line_mean": 29.5987841945,
"line_max": 85,
"alpha_frac": 0.5968014304,
"autogenerated": false,
"ratio": 3.272756827048114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864405094059487,
"avg_score": 0.0010306326777254425,
"num_lines": 329
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from functools import partial, wraps
from toolz import compose, curry
from .core import _concatenate2, Array, atop, names, sqrt, elemwise
from .slicing import insert_many
from ..core import flatten
from . import chunk
from ..utils import ignoring
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, next(names), inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, curry(_concatenate2, axes=axis)),
next(names), inds2, tmp, inds, dtype=dtype)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
chunks = insert_many(result.chunks, axis, [1])
return Array(dsk, result.name, chunks=chunks, dtype=dtype)
else:
return result
@wraps(chunk.sum)
def sum(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.prod)
def prod(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.argmin)
def argmin(a, axis=None):
return arg_reduction(a, chunk.min, chunk.argmin, axis=axis, dtype='i8')
@wraps(chunk.nanargmin)
def nanargmin(a, axis=None):
return arg_reduction(a, chunk.nanmin, chunk.nanargmin, axis=axis,
dtype='i8')
@wraps(chunk.argmax)
def argmax(a, axis=None):
return arg_reduction(a, chunk.max, chunk.argmax, axis=axis, dtype='i8')
@wraps(chunk.nanargmax)
def nanargmax(a, axis=None):
return arg_reduction(a, chunk.nanmax, chunk.nanargmax, axis=axis,
dtype='i8')
@wraps(chunk.any)
def any(a, axis=None, keepdims=False):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.all)
def all(a, axis=None, keepdims=False):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool')
@wraps(chunk.nansum)
def nansum(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).nanprod().dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmin, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False):
return reduction(a, chunk.nanmax, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, **kwargs):
n = numel(x, **kwargs)
total = sum(x, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, **kwargs):
return pair['total'].sum(**kwargs) / pair['n'].sum(**kwargs)
@wraps(chunk.mean)
def mean(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt)
def nanmean(a, axis=None, keepdims=False):
if a._dtype is not None:
dt = np.mean(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt)
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def var_chunk(A, sum=chunk.sum, numel=numel, **kwargs):
n = numel(A, **kwargs)
x = sum(A, dtype='f8', **kwargs)
x2 = sum(A**2, dtype='f8', **kwargs)
result = np.empty(shape=n.shape, dtype=[('x', x.dtype),
('x2', x2.dtype),
('n', n.dtype)])
result['x'] = x
result['x2'] = x2
result['n'] = n
return result
def var_agg(A, ddof=None, **kwargs):
x = A['x'].sum(**kwargs)
x2 = A['x2'].sum(**kwargs)
n = A['n'].sum(**kwargs)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
@wraps(chunk.var)
def var(a, axis=None, keepdims=False, ddof=0):
if a._dtype is not None:
dt = np.var(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, var_chunk, partial(var_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt)
def nanvar(a, axis=None, keepdims=False, ddof=0):
if a._dtype is not None:
dt = np.var(np.empty(shape=(1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, partial(var_chunk, sum=chunk.nansum, numel=nannumel),
partial(var_agg, ddof=ddof), axis=axis, keepdims=keepdims,
dtype=dt)
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, keepdims=False, ddof=0):
return sqrt(a.var(axis=axis, keepdims=keepdims, ddof=ddof))
def nanstd(a, axis=None, keepdims=False, ddof=0):
return sqrt(nanvar(a, axis=axis, keepdims=keepdims, ddof=ddof))
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(a), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, keepdims=keepdims)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, keepdims=keepdims)**(1./ord)
def arg_aggregate(func, argfunc, dims, pairs):
"""
>>> pairs = [([4, 3, 5], [10, 11, 12]),
... ([3, 5, 1], [1, 2, 3])]
>>> arg_aggregate(np.min, np.argmin, (100, 100), pairs)
array([101, 11, 103])
"""
pairs = list(pairs)
mins, argmins = zip(*pairs)
mins = np.array(mins)
argmins = np.array(argmins)
args = argfunc(mins, axis=0)
offsets = np.add.accumulate([0] + list(dims)[:-1])
offsets = offsets.reshape((len(offsets),) + (1,) * (argmins.ndim - 1))
return np.choose(args, argmins + offsets)
def arg_reduction(a, func, argfunc, axis=0, dtype=None):
""" General version of argmin/argmax
>>> arg_reduction(my_array, np.min, axis=0) # doctest: +SKIP
"""
if not isinstance(axis, int):
raise ValueError("Must specify integer axis= keyword argument.\n"
"For example:\n"
" Before: x.argmin()\n"
" After: x.argmin(axis=0)\n")
def argreduce(x):
""" Get both min/max and argmin/argmax of each block """
return (func(x, axis=axis), argfunc(x, axis=axis))
a2 = elemwise(argreduce, a)
return atop(partial(arg_aggregate, func, argfunc, a.chunks[axis]),
next(names), [i for i in range(a.ndim) if i != axis],
a2, list(range(a.ndim)), dtype=dtype)
| {
"repo_name": "esc/dask",
"path": "dask/array/reductions.py",
"copies": "2",
"size": "9176",
"license": "bsd-3-clause",
"hash": 5805616984707362000,
"line_mean": 29.5866666667,
"line_max": 85,
"alpha_frac": 0.5953574542,
"autogenerated": false,
"ratio": 3.230985915492958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9821093906925008,
"avg_score": 0.0010498925535899065,
"num_lines": 300
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from functools import partial, wraps
from toolz import compose, curry
from .core import (_concatenate2, insert_many, Array, atop, names, sqrt,
elemwise)
from ..core import flatten
from ..utils import ignoring
def reduction(x, chunk, aggregate, axis=None, keepdims=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, next(names), inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, curry(_concatenate2, axes=axis)),
next(names), inds2, tmp, inds)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
blockdims = insert_many(result.blockdims, axis, [1])
return Array(dsk, result.name, blockdims=blockdims)
else:
return result
@wraps(np.sum)
def sum(a, axis=None, keepdims=False):
return reduction(a, np.sum, np.sum, axis=axis, keepdims=keepdims)
@wraps(np.prod)
def prod(a, axis=None, keepdims=False):
return reduction(a, np.prod, np.prod, axis=axis, keepdims=keepdims)
@wraps(np.min)
def min(a, axis=None, keepdims=False):
return reduction(a, np.min, np.min, axis=axis, keepdims=keepdims)
@wraps(np.max)
def max(a, axis=None, keepdims=False):
return reduction(a, np.max, np.max, axis=axis, keepdims=keepdims)
@wraps(np.argmin)
def argmin(a, axis=None):
return arg_reduction(a, np.min, np.argmin, axis=axis)
@wraps(np.nanargmin)
def nanargmin(a, axis=None):
return arg_reduction(a, np.nanmin, np.nanargmin, axis=axis)
@wraps(np.argmax)
def argmax(a, axis=None):
return arg_reduction(a, np.max, np.argmax, axis=axis)
@wraps(np.nanargmax)
def nanargmax(a, axis=None):
return arg_reduction(a, np.nanmax, np.nanargmax, axis=axis)
@wraps(np.any)
def any(a, axis=None, keepdims=False):
return reduction(a, np.any, np.any, axis=axis, keepdims=keepdims)
@wraps(np.all)
def all(a, axis=None, keepdims=False):
return reduction(a, np.all, np.all, axis=axis, keepdims=keepdims)
@wraps(np.nansum)
def nansum(a, axis=None, keepdims=False):
return reduction(a, np.nansum, np.sum, axis=axis, keepdims=keepdims)
with ignoring(AttributeError):
@wraps(np.nanprod)
def nanprod(a, axis=None, keepdims=False):
return reduction(a, np.nanprod, np.prod, axis=axis, keepdims=keepdims)
@wraps(np.nanmin)
def nanmin(a, axis=None, keepdims=False):
return reduction(a, np.nanmin, np.min, axis=axis, keepdims=keepdims)
@wraps(np.nanmax)
def nanmax(a, axis=None, keepdims=False):
return reduction(a, np.nanmax, np.max, axis=axis, keepdims=keepdims)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return np.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return np.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=np.sum, numel=numel, **kwargs):
n = numel(x, **kwargs)
total = sum(x, **kwargs)
result = np.empty(shape=n.shape,
dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, **kwargs):
return pair['total'].sum(**kwargs) / pair['n'].sum(**kwargs)
@wraps(np.mean)
def mean(a, axis=None, keepdims=False):
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims)
@wraps(np.nanmean)
def nanmean(a, axis=None, keepdims=False):
return reduction(a, partial(mean_chunk, sum=np.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims)
def var_chunk(A, sum=np.sum, numel=numel, **kwargs):
n = numel(A, **kwargs)
x = sum(A, dtype='f8', **kwargs)
x2 = sum(A**2, dtype='f8', **kwargs)
result = np.empty(shape=n.shape, dtype=[('x', x.dtype),
('x2', x2.dtype),
('n', n.dtype)])
result['x'] = x
result['x2'] = x2
result['n'] = n
return result
def var_agg(A, ddof=None, **kwargs):
x = A['x'].sum(**kwargs)
x2 = A['x2'].sum(**kwargs)
n = A['n'].sum(**kwargs)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
@wraps(np.var)
def var(a, axis=None, keepdims=False, ddof=0):
return reduction(a, var_chunk, partial(var_agg, ddof=ddof), axis=axis, keepdims=keepdims)
@wraps(np.nanvar)
def nanvar(a, axis=None, keepdims=False, ddof=0):
return reduction(a, partial(var_chunk, sum=np.nansum, numel=nannumel),
partial(var_agg, ddof=ddof), axis=axis, keepdims=keepdims)
@wraps(np.std)
def std(a, axis=None, keepdims=False, ddof=0):
return sqrt(a.var(axis=axis, keepdims=keepdims, ddof=ddof))
@wraps(np.nanstd)
def nanstd(a, axis=None, keepdims=False, ddof=0):
return sqrt(nanvar(a, axis=axis, keepdims=keepdims, ddof=ddof))
def vnorm(a, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(a), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, keepdims=keepdims)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, keepdims=keepdims)**(1./ord)
def arg_aggregate(func, argfunc, dims, pairs):
"""
>>> pairs = [([4, 3, 5], [10, 11, 12]),
... ([3, 5, 1], [1, 2, 3])]
>>> arg_aggregate(np.min, np.argmin, (100, 100), pairs)
array([101, 11, 103])
"""
pairs = list(pairs)
mins, argmins = zip(*pairs)
mins = np.array(mins)
argmins = np.array(argmins)
args = argfunc(mins, axis=0)
offsets = np.add.accumulate([0] + list(dims)[:-1])
offsets = offsets.reshape((len(offsets),) + (1,) * (argmins.ndim - 1))
return np.choose(args, argmins + offsets)
def arg_reduction(a, func, argfunc, axis=0):
""" General version of argmin/argmax
>>> arg_reduction(my_array, np.min, axis=0) # doctest: +SKIP
"""
if not isinstance(axis, int):
raise ValueError("Must specify integer axis= keyword argument.\n"
"For example:\n"
" Before: x.argmin()\n"
" After: x.argmin(axis=0)\n")
def argreduce(x):
""" Get both min/max and argmin/argmax of each block """
return (func(x, axis=axis), argfunc(x, axis=axis))
a2 = elemwise(argreduce, a)
return atop(partial(arg_aggregate, func, argfunc, a.blockdims[axis]),
next(names), [i for i in range(a.ndim) if i != axis],
a2, list(range(a.ndim)))
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/reductions.py",
"copies": "1",
"size": "7266",
"license": "bsd-3-clause",
"hash": -9189050662074939000,
"line_mean": 28.2983870968,
"line_max": 93,
"alpha_frac": 0.6169832095,
"autogenerated": false,
"ratio": 3.036356038445466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9147001214445548,
"avg_score": 0.0012676066999834429,
"num_lines": 248
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from .harmonics import ut_E
from .utilities import Bunch
from ._time_conversion import _normalize_time
def reconstruct(t, coef, epoch='python', verbose=True, **opts):
"""
Reconstruct a tidal signal.
Parameters
----------
t : array_like
Time in days since `epoch`.
coef : `Bunch`
Data structure returned by `utide.solve`
epoch : {string, `datetime.date`, `datetime.datetime`}, optional
Valid strings are 'python' (default); 'matlab' if `t` is
an array of Matlab datenums; or an arbitrary date in the
form 'YYYY-MM-DD'. The default corresponds to the Python
standard library `datetime` proleptic Gregorian calendar,
starting with 1 on January 1 of year 1.
verbose : {True, False}, optional
True to enable output message (default). False turns off all
messages.
Returns
-------
tide : `Bunch`
Scalar time series is returned as `tide.h`; a vector
series as `tide.u`, `tide.v`.
"""
out = Bunch()
u, v = _reconstr1(t, coef, epoch=epoch, verbose=verbose, **opts)
if coef['aux']['opt']['twodim']:
out.u, out.v = u, v
else:
out.h = u
return out
def _reconstr1(tin, coef, **opts):
# Parse inputs and options.
t, goodmask, opt = _rcninit(tin, **opts)
if opt['RunTimeDisp']:
print('reconstruct:', end='')
# Determine constituents to include.
# if ~isempty(opt.cnstit)
# if not np.empty(opt['cnstit']):
if opt['cnstit']:
# [~,ind] = ismember(cellstr(opt.cnstit),coef.name);
# opt['cnstit'] in coef['name']
ind = np.where(opt['cnstit'] == coef['name'])
# if ~isequal(length(ind),length(cellstr(opt.cnstit)))
# error(['reconstruct: one or more of input constituents Cnstit '...
# 'not found in coef.name']);
else:
ind = np.arange(len(coef['aux']['frq']))
if coef['aux']['opt']['twodim']:
SNR = ((coef['Lsmaj']**2 + coef['Lsmin']**2) /
((coef['Lsmaj_ci']/1.96)**2 + (coef['Lsmin_ci']/1.96)**2))
PE = sum(coef['Lsmaj']**2 + coef['Lsmin']**2)
PE = 100*(coef['Lsmaj']**2 + coef['Lsmin']**2)/PE
else:
SNR = (coef['A']**2)/((coef['A_ci']/1.96)**2)
PE = 100*coef['A']**2/sum(coef['A']**2)
# ind = ind[SNR[ind]>=opt['minsnr'] & PE[ind]>=opt['minpe']]
ind = np.where(np.logical_and(SNR[ind] >= opt['minsnr'],
PE[ind] >= opt['minpe']))[0]
# Complex coefficients.
rpd = np.pi/180
if coef['aux']['opt']['twodim']:
ap = 0.5 * ((coef['Lsmaj'][ind] + coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] - coef['g'][ind]) * rpd))
am = 0.5 * ((coef['Lsmaj'][ind] - coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] + coef['g'][ind]) * rpd))
else:
ap = 0.5 * coef['A'][ind] * np.exp(-1j*coef['g'][ind] * rpd)
am = np.conj(ap)
# Exponentials.
ngflgs = [coef['aux']['opt']['nodsatlint'],
coef['aux']['opt']['nodsatnone'],
coef['aux']['opt']['gwchlint'],
coef['aux']['opt']['gwchnone']]
if opt['RunTimeDisp']:
print('prep/calcs ... ', end='')
E = ut_E(t,
coef['aux']['reftime'], coef['aux']['frq'][ind],
coef['aux']['lind'][ind], coef['aux']['lat'], ngflgs,
coef['aux']['opt']['prefilt'])
# Fit.
# fit = E*ap + np.conj(E)*am
fit = np.dot(E, ap) + np.dot(np.conj(E), am)
# Mean (& trend).
u = np.nan * np.ones(tin.shape)
whr = goodmask
if coef['aux']['opt']['twodim']:
v = np.nan * np.ones(tin.shape)
if coef['aux']['opt']['notrend']:
u[whr] = np.real(fit) + coef['umean']
v[whr] = np.imag(fit) + coef['vmean']
else:
u[whr] = np.real(fit) + coef['umean']
u[whr] += coef['uslope'] * (t-coef['aux']['reftime'])
v[whr] = np.imag(fit) + coef['vmean']
v[whr] += coef['vslope'] * (t-coef['aux']['reftime'])
else:
if coef['aux']['opt']['notrend']:
u[whr] = np.real(fit) + coef['mean']
else:
u[whr] = np.real(fit) + coef['mean']
u[whr] += coef['slope'] * (t-coef['aux']['reftime'])
v = None
if opt['RunTimeDisp']:
print('done.')
return u, v
def _rcninit(tin, **opts):
t = tin[:]
# Supporting only 1-D arrays for now; we can add "group"
# support later.
if tin.ndim != 1:
raise ValueError("t must be a 1-D array")
# Step 0: apply epoch to time.
t = _normalize_time(tin, opts['epoch'])
# Step 1: remove invalid times from tin
t = np.ma.masked_invalid(t)
goodmask = ~np.ma.getmaskarray(t)
t = t.compressed()
opt = {}
opt['cnstit'] = False
opt['minsnr'] = 2
opt['minpe'] = 0
for key, item in opts.items():
# Be backward compatible with the MATLAB package syntax.
if key == 'verbose':
opt['RunTimeDisp'] = item
try:
opt[key] = item
except KeyError:
print('reconstruct: unrecognized input: {0}'.format(key))
return t, goodmask, opt
| {
"repo_name": "efiring/UTide",
"path": "utide/_reconstruct.py",
"copies": "1",
"size": "5372",
"license": "mit",
"hash": -7137091002480009000,
"line_mean": 29.6971428571,
"line_max": 79,
"alpha_frac": 0.5147058824,
"autogenerated": false,
"ratio": 3.203339296362552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42180451787625517,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from itertools import chain
import h5py
from dynd import nd
import datashape
from datashape import var, dshape
from toolz.curried import pipe, concat, map, partial
from ..dispatch import dispatch
from .core import DataDescriptor
from ..utils import partition_all, get
from ..compatibility import _strtypes, unicode
from ..resource import resource
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
__all__ = ['HDF5', 'discover']
@dispatch(h5py.Dataset)
def discover(d):
s = str(datashape.from_numpy(d.shape, d.dtype))
return dshape(s.replace('object', 'string'))
def varlen_dtype(dt):
""" Inject variable length string element for 'O' """
if "'O'" not in str(dt):
return dt
varlen = h5py.special_dtype(vlen=unicode)
return np.dtype(eval(str(dt).replace("'O'", 'varlen')))
class HDF5(DataDescriptor):
"""
A Blaze data descriptor which exposes an HDF5 file.
Parameters
----------
path: string
Location of hdf5 file on disk
datapath: string
Location of array dataset in hdf5
dshape: string or Datashape
a datashape describing the data
schema: string or DataShape
datashape describing one row of data
**kwargs:
Options to send to h5py - see h5py.File.create_dataset for options
"""
def __init__(self, path, datapath,
schema=None, dshape=None, **kwargs):
self.path = path
self.datapath = datapath
if isinstance(schema, _strtypes):
schema = datashape.dshape(schema)
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if schema and not dshape:
dshape = var * datashape.dshape(schema)
if not dshape:
with h5py.File(path, 'r') as f:
dset = f.get(datapath)
if dset:
dshape = discover(dset)
else:
raise ValueError("No datashape given or found. "
"Please specify dshape or schema keyword args")
# TODO: provide sane defaults for kwargs
# Notably chunks and maxshape
shape = dshape.shape
dtype = varlen_dtype(dshape[-1].to_numpy_dtype())
if shape[0] == datashape.Var():
kwargs['chunks'] = True
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
shape = (0,) + tuple(map(int, shape[1:]))
with h5py.File(path) as f:
dset = f.get(datapath)
if not dset:
f.create_dataset(datapath, shape, dtype=dtype, **kwargs)
attributes = self.attributes()
if attributes['chunks']:
dshape = var * dshape.subshape[0]
self._dshape = dshape
self._schema = schema
def attributes(self):
with h5py.File(self.path, 'r') as f:
arr = f[self.datapath]
result = dict((attr, getattr(arr, attr))
for attr in h5py_attributes)
return result
def _get_dynd(self, key):
if (isinstance(key, tuple) and
len(key) > len(self.dshape.shape) and
isinstance(self.dshape[-1], datashape.Record)):
rec_key = get(key[-1], self.dshape[-1].names)
if isinstance(rec_key, tuple):
key = rec_key + key[:-1]
else:
key = (rec_key,) + key[:-1]
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
result = np.asarray(arr.__getitem__(key))
return nd.asarray(result, access='readonly')
def _get_py(self, key):
if (isinstance(key, tuple) and
len(key) > len(self.dshape.shape) and
isinstance(self.dshape[-1], datashape.Record)):
rec_key = get(key[-1], self.dshape[-1].names)
if isinstance(rec_key, tuple):
key = rec_key + key[:-1]
else:
key = (rec_key,) + key[:-1]
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
result = np.asarray(arr.__getitem__(key))
return result.tolist()
def __setitem__(self, key, value):
with h5py.File(self.path) as f:
arr = f[self.datapath]
arr[key] = value
return self
def _chunks(self, blen=None):
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
if not blen and arr.chunks:
blen = arr.chunks[0] * 4
blen = blen or 1024
for i in range(0, arr.shape[0], blen):
yield np.array(arr[i:i+blen])
def __iter__(self):
return pipe(self.chunks(), map(partial(nd.as_py, tuple=True)), concat)
def as_dynd(self):
return self.dynd[:]
def _extend_chunks(self, chunks):
with h5py.File(self.path, mode='a') as f:
dset = f[self.datapath]
dtype = dset.dtype
shape = dset.shape
for chunk in chunks:
arr = nd.as_numpy(chunk, allow_copy=True)
shape = list(dset.shape)
shape[0] += len(arr)
dset.resize(shape)
dset[-len(arr):] = arr
def _extend(self, seq):
chunks = partition_all(100, seq)
with h5py.File(self.path, mode='a') as f:
dset = f[self.datapath]
dtype = dset.dtype
shape = dset.shape
for chunk in chunks:
arr = np.asarray(list(chunk), dtype=dtype)
shape = list(dset.shape)
shape[0] += len(arr)
dset.resize(shape)
dset[-len(arr):] = arr
@dispatch(HDF5)
def drop(h):
with h5py.File(h.path) as f:
del f[h.datapath]
@resource.register('.+\.hdf5')
def resource_hdf5(uri, datapath, *args, **kwargs):
return HDF5(uri, datapath, *args, **kwargs)
| {
"repo_name": "vitan/blaze",
"path": "blaze/data/hdf5.py",
"copies": "1",
"size": "6096",
"license": "bsd-3-clause",
"hash": 3408942459743557600,
"line_mean": 31.0842105263,
"line_max": 78,
"alpha_frac": 0.5508530184,
"autogenerated": false,
"ratio": 3.6634615384615383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710017917693445,
"avg_score": 0.0008593278336183987,
"num_lines": 190
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from itertools import chain
import h5py
from dynd import nd
import datashape
from datashape import var
from ..dispatch import dispatch
from .core import DataDescriptor
from ..utils import partition_all, get
from ..compatibility import _strtypes
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
__all__ = ['HDF5', 'discover']
@dispatch(h5py.Dataset)
def discover(d):
return datashape.from_numpy(d.shape, d.dtype)
class HDF5(DataDescriptor):
"""
A Blaze data descriptor which exposes an HDF5 file.
Parameters
----------
path: string
Location of hdf5 file on disk
datapath: string
Location of array dataset in hdf5
mode : string
r, w, rw+
dshape: string or Datashape
a datashape describing the data
schema: string or DataShape
datashape describing one row of data
**kwargs:
Options to send to h5py - see h5py.File.create_dataset for options
"""
immutable = False
deferred = False
persistent = True
appendable = True
remote = False
def __init__(self, path, datapath, mode='r',
schema=None, dshape=None, **kwargs):
self.path = path
self.datapath = datapath
self.mode = mode
if isinstance(schema, _strtypes):
schema = datashape.dshape(schema)
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if schema and not dshape:
dshape = var * datashape.dshape(schema)
# TODO: provide sane defaults for kwargs
# Notably chunks and maxshape
if dshape:
dshape = datashape.dshape(dshape)
shape = dshape.shape
dtype = dshape[-1].to_numpy_dtype()
if shape[0] == datashape.Var():
kwargs['chunks'] = True
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
shape = (0,) + tuple(map(int, shape[1:]))
with h5py.File(path, mode) as f:
dset = f.get(datapath)
if dset:
file_dshape = discover(dset)
if dshape and file_dshape != dshape:
raise TypeError("Inconsistent dshapes given:\n"
"\tGiven: %s\n"
"\tFound: %s\n" % (dshape, file_dshape))
else:
dshape = file_dshape
if not dset:
f.create_dataset(datapath, shape, dtype=dtype, **kwargs)
attributes = self.attributes()
if attributes['chunks']:
dshape = var * dshape.subshape[0]
self._dshape = dshape
self._schema = schema
def attributes(self):
with h5py.File(self.path, 'r') as f:
arr = f[self.datapath]
result = dict((attr, getattr(arr, attr))
for attr in h5py_attributes)
return result
def _get_dynd(self, key):
if (isinstance(key, tuple) and
len(key) > len(self.dshape.shape) and
isinstance(self.dshape[-1], datashape.Record)):
rec_key = get(key[-1], self.dshape[-1].names)
if isinstance(rec_key, tuple):
rec_key = list(rec_key)
key = (rec_key,) + key[:-1]
print(key)
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
result = np.asarray(arr.__getitem__(key))
return nd.asarray(result, access='readonly')
def __setitem__(self, key, value):
with h5py.File(self.path, mode=self.mode) as f:
arr = f[self.datapath]
arr[key] = value
return self
def _chunks(self, blen=100):
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
for i in range(0, arr.shape[0], blen):
yield np.array(arr[i:i+blen])
def as_dynd(self):
return self.dynd[:]
def _extend_chunks(self, chunks):
if 'w' not in self.mode and 'a' not in self.mode:
raise ValueError('Read only')
with h5py.File(self.path, mode=self.mode) as f:
dset = f[self.datapath]
dtype = dset.dtype
shape = dset.shape
for chunk in chunks:
arr = nd.as_numpy(chunk, allow_copy=True)
shape = list(dset.shape)
shape[0] += len(arr)
dset.resize(shape)
dset[-len(arr):] = arr
def _extend(self, seq):
self.extend_chunks(partition_all(100, seq))
def _iter(self):
return chain.from_iterable(self.chunks())
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/hdf5.py",
"copies": "1",
"size": "4820",
"license": "bsd-3-clause",
"hash": -8023342799787459000,
"line_mean": 31.1333333333,
"line_max": 80,
"alpha_frac": 0.5510373444,
"autogenerated": false,
"ratio": 3.786331500392773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9836401766603473,
"avg_score": 0.0001934156378600823,
"num_lines": 150
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from itertools import chain
import h5py
from dynd import nd
import datashape
from .core import DataDescriptor
from ..utils import partition_all
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
__all__ = ['HDF5']
class HDF5(DataDescriptor):
"""
A Blaze data descriptor which exposes an HDF5 file.
Parameters
----------
path: string
Location of hdf5 file on disk
datapath: string
Location of array dataset in hdf5
mode : string
r, w, rw+
dshape: string or Datashape
a datashape describing the data
schema: string or DataShape
datashape describing one row of data
**kwargs:
Options to send to h5py - see h5py.File.create_dataset for options
"""
immutable = False
deferred = False
persistent = True
appendable = True
remote = False
def __init__(self, path, datapath, mode='r', schema=None, dshape=None, **kwargs):
self.path = path
self.datapath = datapath
self.mode = mode
if schema and not dshape:
dshape = 'var * ' + str(schema)
# TODO: provide sane defaults for kwargs
# Notably chunks and maxshape
if dshape:
dshape = datashape.dshape(dshape)
shape = dshape.shape
dtype = datashape.to_numpy_dtype(dshape[-1])
if shape[0] == datashape.Var():
kwargs['chunks'] = True
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
shape = (0,) + tuple(map(int, shape[1:]))
with h5py.File(path, mode) as f:
dset = f.get(datapath)
if dset is None:
if dshape is None:
raise ValueError('No dataset or dshape provided')
else:
f.create_dataset(datapath, shape, dtype=dtype, **kwargs)
else:
dshape2 = datashape.from_numpy(dset.shape, dset.dtype)
dshape = dshape2
# TODO: test provided dshape against given dshape
# if dshape and dshape != dshape2:
# raise ValueError('Inconsistent datashapes.'
# '\nGiven: %s\nFound: %s' % (dshape, dshape2))
attributes = self.attributes()
if attributes['chunks']:
# is there a better way to do this?
words = str(dshape).split(' * ')
dshape = 'var * ' + ' * '.join(words[1:])
dshape = datashape.dshape(dshape)
self._dshape = dshape
self._schema = schema
def attributes(self):
with h5py.File(self.path, 'r') as f:
arr = f[self.datapath]
result = dict((attr, getattr(arr, attr))
for attr in h5py_attributes)
return result
def __getitem__(self, key):
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
result = np.asarray(arr[key])
return nd.asarray(result, access='readonly')
def __setitem__(self, key, value):
with h5py.File(self.path, mode=self.mode) as f:
arr = f[self.datapath]
arr[key] = value
return self
def _chunks(self, blen=100):
with h5py.File(self.path, mode='r') as f:
arr = f[self.datapath]
for i in range(0, arr.shape[0], blen):
yield np.array(arr[i:i+blen])
def as_dynd(self):
return self[:]
def _extend_chunks(self, chunks):
if 'w' not in self.mode and 'a' not in self.mode:
raise ValueError('Read only')
with h5py.File(self.path, mode=self.mode) as f:
dset = f[self.datapath]
dtype = dset.dtype
shape = dset.shape
for chunk in chunks:
arr = np.array(chunk, dtype=dtype)
shape = list(dset.shape)
shape[0] += len(arr)
dset.resize(shape)
dset[-len(arr):] = arr
def _extend(self, seq):
self.extend_chunks(partition_all(100, seq))
def _iter(self):
return chain.from_iterable(self.chunks())
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/hdf5.py",
"copies": "1",
"size": "4332",
"license": "bsd-3-clause",
"hash": -6932624940386869000,
"line_mean": 31.5714285714,
"line_max": 85,
"alpha_frac": 0.5466297322,
"autogenerated": false,
"ratio": 3.8336283185840707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877354602236985,
"avg_score": 0.0005806897094172811,
"num_lines": 133
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from itertools import product
from .core import normalize_chunks, Array, names
def doc_wraps(func):
""" Copy docstring from one function to another """
def _(func2):
func2.__doc__ = func.__doc__.replace('>>>', '>>').replace('...', '..')
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministicly generate pseudo-random
numbers from a variety of probabilitiy distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([ 9.95487579, 10.02999135, 10.08498441])
See Also:
np.random.RandomState
"""
def __init__(self, seed=None):
self._numpy_state = np.random.RandomState(seed)
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(self, func, *args, **kwargs):
size = kwargs.pop('size')
chunks = kwargs.pop('chunks')
if not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size)
name = next(names)
# Get dtype
kw = kwargs.copy()
kw['size'] = (0,)
dtype = func(np.random.RandomState(), *args, **kw).dtype
# Build graph
keys = product([name], *[range(len(bd)) for bd in chunks])
sizes = product(*chunks)
vals = ((_apply_random,
func.__name__,
self._numpy_state.randint(np.iinfo(np.int32).max),
size, args, kwargs)
for size in sizes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@doc_wraps(np.random.RandomState.beta)
def beta(self, a, b, size=None, chunks=None):
return self._wrap(np.random.RandomState.beta, a, b,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.binomial)
def binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.chisquare)
def chisquare(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.chisquare, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.choice)
def choice(self, a, size=None, replace=True, p=None, chunks=None):
return self._wrap(np.random.RandomState.choice, a,
size=size, replace=True, p=None, chunks=chunks)
# @doc_wraps(np.random.RandomState.dirichlet)
# def dirichlet(self, alpha, size=None, chunks=None):
@doc_wraps(np.random.RandomState.exponential)
def exponential(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.exponential, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.f)
def f(self, dfnum, dfden, size=None, chunks=None):
return self._wrap(np.random.RandomState.f, dfnum, dfden,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gamma)
def gamma(self, shape, scale=1.0, chunks=None):
return self._wrap(np.random.RandomState.gamma, scale,
size=shape, chunks=chunks)
@doc_wraps(np.random.RandomState.geometric)
def geometric(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.geometric, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gumbel)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.gumbel, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.hypergeometric)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=None):
return self._wrap(np.random.RandomState.hypergeometric,
ngood, nbad, nsample,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.laplace)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.laplace, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logistic)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.logistic, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.lognormal)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.lognormal, mean, sigma,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logseries)
def logseries(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.logseries, p,
size=size, chunks=chunks)
# multinomial
@doc_wraps(np.random.RandomState.negative_binomial)
def negative_binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.negative_binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_chisquare)
def noncentral_chisquare(self, df, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_chisquare, df, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_f)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_f,
dfnum, dfden, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.normal)
def normal(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.normal, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.pareto)
def pareto(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.pareto, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.poisson)
def poisson(self, lam=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.poisson, lam,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.power)
def power(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.power, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.randint)
def randint(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.randint, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_integers)
def random_integers(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_integers, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_sample)
def random_sample(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_sample,
size=size, chunks=chunks)
random = random_sample
@doc_wraps(np.random.RandomState.rayleigh)
def rayleigh(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.rayleigh, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_cauchy)
def standard_cauchy(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_cauchy,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_exponential)
def standard_exponential(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_exponential,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_gamma)
def standard_gamma(self, shape, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_gamma, shape,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_normal)
def standard_normal(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_normal,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_t)
def standard_t(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_t, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.tomaxint)
def tomaxint(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.tomaxint,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.triangular)
def triangular(self, left, mode, right, size=None, chunks=None):
return self._wrap(np.random.RandomState.triangular, left, mode, right,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.uniform)
def uniform(self, low=0.0, high=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.uniform, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.vonmises)
def vonmises(self, mu, kappa, size=None, chunks=None):
return self._wrap(np.random.RandomState.vonmises, mu, kappa,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.wald)
def wald(self, mean, scale, size=None, chunks=None):
return self._wrap(np.random.RandomState.wald, mean, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.weibull)
def weibull(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.weibull, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.zipf)
def zipf(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.zipf, a,
size=size, chunks=chunks)
def _apply_random(func, seed, size, args, kwargs):
""" Apply RandomState method with seed
>>> _apply_random('normal', 123, 3, (10, 1.0), {})
array([ 8.9143694 , 10.99734545, 10.2829785 ])
"""
state = np.random.RandomState(seed)
func = getattr(state, func)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
| {
"repo_name": "simudream/dask",
"path": "dask/array/random.py",
"copies": "5",
"size": "12030",
"license": "bsd-3-clause",
"hash": -3960516644082094000,
"line_mean": 36.7115987461,
"line_max": 79,
"alpha_frac": 0.6323358271,
"autogenerated": false,
"ratio": 3.5931899641577063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6725525791257706,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from itertools import product
from .core import normalize_chunks, Array
from ..base import tokenize
def doc_wraps(func):
""" Copy docstring from one function to another """
def _(func2):
func2.__doc__ = func.__doc__.replace('>>>', '>>').replace('...', '..')
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministicly generate pseudo-random
numbers from a variety of probabilitiy distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([ 9.95487579, 10.02999135, 10.08498441])
See Also:
np.random.RandomState
"""
def __init__(self, seed=None):
self._numpy_state = np.random.RandomState(seed)
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(self, func, *args, **kwargs):
size = kwargs.pop('size')
chunks = kwargs.pop('chunks')
if not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size)
# Get dtype
kw = kwargs.copy()
kw['size'] = (0,)
dtype = func(np.random.RandomState(), *args, **kw).dtype
# Build graph
sizes = list(product(*chunks))
seeds = [self._numpy_state.randint(np.iinfo(np.int32).max) for i in sizes]
token = tokenize(seeds, size, chunks, args, kwargs)
name = 'da.random.{0}-{1}'.format(func.__name__, token)
keys = product([name], *[range(len(bd)) for bd in chunks])
vals = ((_apply_random, func.__name__, seed, size, args, kwargs)
for seed, size in zip(seeds, sizes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
@doc_wraps(np.random.RandomState.beta)
def beta(self, a, b, size=None, chunks=None):
return self._wrap(np.random.RandomState.beta, a, b,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.binomial)
def binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.chisquare)
def chisquare(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.chisquare, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.choice)
def choice(self, a, size=None, replace=True, p=None, chunks=None):
return self._wrap(np.random.RandomState.choice, a,
size=size, replace=True, p=None, chunks=chunks)
# @doc_wraps(np.random.RandomState.dirichlet)
# def dirichlet(self, alpha, size=None, chunks=None):
@doc_wraps(np.random.RandomState.exponential)
def exponential(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.exponential, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.f)
def f(self, dfnum, dfden, size=None, chunks=None):
return self._wrap(np.random.RandomState.f, dfnum, dfden,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gamma)
def gamma(self, shape, scale=1.0, chunks=None):
return self._wrap(np.random.RandomState.gamma, scale,
size=shape, chunks=chunks)
@doc_wraps(np.random.RandomState.geometric)
def geometric(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.geometric, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.gumbel)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.gumbel, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.hypergeometric)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=None):
return self._wrap(np.random.RandomState.hypergeometric,
ngood, nbad, nsample,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.laplace)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.laplace, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logistic)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.logistic, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.lognormal)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.lognormal, mean, sigma,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.logseries)
def logseries(self, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.logseries, p,
size=size, chunks=chunks)
# multinomial
@doc_wraps(np.random.RandomState.negative_binomial)
def negative_binomial(self, n, p, size=None, chunks=None):
return self._wrap(np.random.RandomState.negative_binomial, n, p,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_chisquare)
def noncentral_chisquare(self, df, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_chisquare, df, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.noncentral_f)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=None):
return self._wrap(np.random.RandomState.noncentral_f,
dfnum, dfden, nonc,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.normal)
def normal(self, loc=0.0, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.normal, loc, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.pareto)
def pareto(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.pareto, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.poisson)
def poisson(self, lam=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.poisson, lam,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.power)
def power(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.power, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.randint)
def randint(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.randint, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_integers)
def random_integers(self, low, high=None, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_integers, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.random_sample)
def random_sample(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.random_sample,
size=size, chunks=chunks)
random = random_sample
@doc_wraps(np.random.RandomState.rayleigh)
def rayleigh(self, scale=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.rayleigh, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_cauchy)
def standard_cauchy(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_cauchy,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_exponential)
def standard_exponential(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_exponential,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_gamma)
def standard_gamma(self, shape, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_gamma, shape,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_normal)
def standard_normal(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_normal,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.standard_t)
def standard_t(self, df, size=None, chunks=None):
return self._wrap(np.random.RandomState.standard_t, df,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.tomaxint)
def tomaxint(self, size=None, chunks=None):
return self._wrap(np.random.RandomState.tomaxint,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.triangular)
def triangular(self, left, mode, right, size=None, chunks=None):
return self._wrap(np.random.RandomState.triangular, left, mode, right,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.uniform)
def uniform(self, low=0.0, high=1.0, size=None, chunks=None):
return self._wrap(np.random.RandomState.uniform, low, high,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.vonmises)
def vonmises(self, mu, kappa, size=None, chunks=None):
return self._wrap(np.random.RandomState.vonmises, mu, kappa,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.wald)
def wald(self, mean, scale, size=None, chunks=None):
return self._wrap(np.random.RandomState.wald, mean, scale,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.weibull)
def weibull(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.weibull, a,
size=size, chunks=chunks)
@doc_wraps(np.random.RandomState.zipf)
def zipf(self, a, size=None, chunks=None):
return self._wrap(np.random.RandomState.zipf, a,
size=size, chunks=chunks)
def _apply_random(func, seed, size, args, kwargs):
""" Apply RandomState method with seed
>>> _apply_random('normal', 123, 3, (10, 1.0), {})
array([ 8.9143694 , 10.99734545, 10.2829785 ])
"""
state = np.random.RandomState(seed)
func = getattr(state, func)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
| {
"repo_name": "PhE/dask",
"path": "dask/array/random.py",
"copies": "4",
"size": "12156",
"license": "bsd-3-clause",
"hash": 916484420527660000,
"line_mean": 37.1065830721,
"line_max": 82,
"alpha_frac": 0.6349128003,
"autogenerated": false,
"ratio": 3.5773984696880516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6212311269988052,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib.colors import ColorConverter
from glue import config
from qtpy import QtCore, QtWidgets, QtGui
from glue.external.echo import add_callback
from glue.utils import nonpartial
from glue.utils.qt.widget_properties import WidgetProperty
from matplotlib import cm
__all__ = ['mpl_to_qt4_color', 'qt4_to_mpl_color', 'cmap2pixmap',
'tint_pixmap', 'QColorBox', 'ColorProperty', 'connect_color',
'QColormapCombo']
def mpl_to_qt4_color(color, alpha=None):
"""
Convert a matplotlib color stirng into a Qt QColor object
Parameters
----------
color : str
A color specification that matplotlib understands
alpha : float
Optional opacity. Float in range [0,1]
Returns
-------
qcolor : ``QColor``
A QColor object representing the converted color
"""
if color in [None, 'none', 'None']:
return QtGui.QColor(0, 0, 0, 0)
cc = ColorConverter()
r, g, b, a = cc.to_rgba(color)
if alpha is not None:
a = alpha
return QtGui.QColor(r * 255, g * 255, b * 255, a * 255)
def qt4_to_mpl_color(qcolor):
"""
Convert a QColor object into a string that matplotlib understands
Note: This ignores opacity
Parameters
----------
qcolor : ``QColor``
The Qt color
Returns
-------
color : str
A hex string describing that color
"""
hexid = qcolor.name()
return str(hexid)
def cmap2pixmap(cmap, steps=50, size=(100,100)):
"""
Convert a maplotlib colormap into a QPixmap
Parameters
----------
cmap : `~matplotlib.colors.Colormap`
The colormap to use
steps : int
The number of color steps in the output. Default=50
Returns
-------
pixmap : ``QPixmap``
The QPixmap instance
"""
sm = cm.ScalarMappable(cmap=cmap)
sm.norm.vmin = 0.0
sm.norm.vmax = 1.0
inds = np.linspace(0, 1, steps)
rgbas = sm.to_rgba(inds)
rgbas = [QtGui.QColor(int(r * 255), int(g * 255),
int(b * 255), int(a * 255)).rgba() for r, g, b, a in rgbas]
im = QtGui.QImage(steps, 1, QtGui.QImage.Format_Indexed8)
im.setColorTable(rgbas)
for i in range(steps):
im.setPixel(i, 0, i)
im = im.scaled(*size)
pm = QtGui.QPixmap.fromImage(im)
return pm
def tint_pixmap(bm, color):
"""
Re-color a monochrome pixmap object using `color`
Parameters
----------
bm : ``QBitmap``
The Pixmap object
color : ``QColor``
The Qt color
Returns
-------
pixmap : ``QPixmap``
The new pixmap
"""
if bm.depth() != 1:
raise TypeError("Input pixmap must have a depth of 1: %i" % bm.depth())
image = bm.toImage()
image.setColor(1, color.rgba())
image.setColor(0, QtGui.QColor(0, 0, 0, 0).rgba())
result = QtGui.QPixmap.fromImage(image)
return result
class ColorProperty(WidgetProperty):
def getter(self, widget):
return widget.color()
def setter(self, widget, value):
widget.setColor(value)
def connect_color(client, prop, widget):
def update_widget(text):
widget.setColor(text)
def update_prop():
setattr(client, prop, widget.color())
add_callback(client, prop, update_widget)
widget.colorChanged.connect(nonpartial(update_prop))
update_widget(getattr(client, prop))
from glue.external.echo.qt.autoconnect import HANDLERS
HANDLERS['color'] = connect_color
class QColorBox(QtWidgets.QLabel):
mousePressed = QtCore.Signal()
colorChanged = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(QColorBox, self).__init__(*args, **kwargs)
self.mousePressed.connect(nonpartial(self.query_color))
self.colorChanged.connect(nonpartial(self.on_color_change))
self.setColor("#000000")
def mousePressEvent(self, event):
self.mousePressed.emit()
event.accept()
def query_color(self):
color = QtWidgets.QColorDialog.getColor(self._qcolor, parent=self)
if color.isValid():
self.setColor(qt4_to_mpl_color(color))
def setColor(self, color):
self._color = color
self.colorChanged.emit()
def color(self):
return self._color
def on_color_change(self):
self._qcolor = mpl_to_qt4_color(self.color())
image = QtGui.QImage(70, 22, QtGui.QImage.Format_RGB32)
try:
image.fill(self._qcolor)
except TypeError:
# PySide and old versions of PyQt require a RGBA integer
image.fill(self._qcolor.rgba())
pixmap = QtGui.QPixmap.fromImage(image)
self.setPixmap(pixmap)
class QColormapCombo(QtWidgets.QComboBox):
def __init__(self, *args, **kwargs):
super(QColormapCombo, self).__init__(*args, **kwargs)
for label, cmap in config.colormaps:
self.addItem("", userData=cmap)
self._update_icons()
def _update_icons(self):
self.setIconSize(QtCore.QSize(self.width(), 15))
for index in range(self.count()):
cmap = self.itemData(index)
icon = QtGui.QIcon(cmap2pixmap(cmap, size=(self.width(), 15), steps=200))
self.setItemIcon(index, icon)
def resizeEvent(self, *args, **kwargs):
super(QColormapCombo, self).resizeEvent(*args, **kwargs)
self._update_icons()
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
label = QColorBox()
label.resize(100,100)
label.show()
label.raise_()
app.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/qt/colors.py",
"copies": "3",
"size": "5669",
"license": "bsd-3-clause",
"hash": -5737210371490865000,
"line_mean": 24.8858447489,
"line_max": 85,
"alpha_frac": 0.6128064914,
"autogenerated": false,
"ratio": 3.6621447028423773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004620065154584793,
"num_lines": 219
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib.transforms import blended_transform_factory
from glue.core.callback_property import CallbackProperty, add_callback
PICK_THRESH = 30 # pixel distance threshold for picking
class Grip(object):
def __init__(self, viewer, artist=True):
self.viewer = viewer
self.enabled = True
self.artist = None
if artist:
self.artist = self._artist_factory()
def remove(self):
raise NotImplementedError()
def _artist_factory(self):
raise NotImplementedError()
def pick_dist(self, x, y):
"""
Return the distance, in pixels,
between a point in (x,y) data space and
the grip
"""
raise NotImplementedError()
def dblclick(self, x, y):
"""Respond to a double-click event
Default is to ignore
"""
pass
def select(self, x, y):
"""
Process a selection event (click) at x,y
"""
raise NotImplementedError()
def drag(self, x, y):
"""
Process a drag to x, y
"""
raise NotImplementedError()
def release(self):
"""
Process a release
"""
raise NotImplementedError()
def disable(self):
self.enabled = False
if self.artist is not None:
self.artist.set_visible(False)
self.viewer.axes.figure.canvas.draw()
def enable(self):
self.enabled = True
if self.artist is not None:
self.artist.set_visible(True)
self.viewer.axes.figure.canvas.draw()
class ValueGrip(Grip):
value = CallbackProperty(None)
def __init__(self, viewer, artist=True):
super(ValueGrip, self).__init__(viewer, artist)
self._drag = False
def _artist_factory(self):
return ValueArtist(self)
def dblclick(self, x, y):
self.value = x
def pick_dist(self, x, y):
xy = [[x, y], [self.value, y]]
xypix = self.viewer.axes.transData.transform(xy)
return abs(xypix[1, 0] - xypix[0, 0])
def select(self, x, y):
if self.pick_dist(x, y) > PICK_THRESH:
return
self._drag = True
def drag(self, x, y):
if self._drag:
self.value = x
def release(self):
self._drag = False
class RangeGrip(Grip):
range = CallbackProperty((None, None))
def __init__(self, viewer):
super(RangeGrip, self).__init__(viewer)
# track state during drags
self._move = None
self._ref = None
self._refx = None
self._refnew = None
def _artist_factory(self):
return RangeArtist(self)
def pick_dist(self, x, y):
xy = np.array([[x, y],
[self.range[0], y],
[self.range[1], y],
[sum(self.range) / 2, y]])
xypix = self.viewer.axes.transData.transform(xy)
dx = np.abs(xypix[1:] - xypix[0])[:, 0]
return min(dx)
def select(self, x, y):
if self.pick_dist(x, y) > PICK_THRESH:
return self.new_select(x, y)
cen = sum(self.range) / 2.
wid = self.range[1] - self.range[0]
if x < cen - wid / 4.:
self._move = 'left'
elif x < cen + wid / 4.:
self._move = 'center'
self._ref = self.range
self._refx = x
else:
self._move = 'right'
def new_select(self, x, y):
"""
Begin a selection in "new range" mode.
In this mode, the previous grip position is ignored,
and the new range is defined by the select/release positions
"""
self._refnew = x
self.range = (x, x)
def new_drag(self, x, y):
"""
Drag the selection in "new mode"
"""
if self._refnew is not None:
self._set_range(self._refnew, x)
def drag(self, x, y):
if self._refnew is not None:
return self.new_drag(x, y)
if self._move == 'left':
if x > self.range[1]:
self._move = 'right'
self._set_range(x, self.range[1])
elif self._move == 'center':
dx = (x - self._refx)
self._set_range(self._ref[0] + dx, self._ref[1] + dx)
else:
if x < self.range[0]:
self._move = 'left'
self._set_range(self.range[0], x)
def _set_range(self, lo, hi):
self.range = min(lo, hi), max(lo, hi)
def release(self):
self._move = None
self._ref = None
self._refx = None
self._refnew = None
class ValueArtist(object):
def __init__(self, grip, **kwargs):
self.grip = grip
add_callback(grip, 'value', self._update)
ax = self.grip.viewer.axes
kwargs.setdefault('lw', 2)
kwargs.setdefault('alpha', 0.5)
kwargs.setdefault('c', '#ffb304')
trans = blended_transform_factory(ax.transData, ax.transAxes)
self._line, = ax.plot([grip.value, grip.value], [0, 1],
transform=trans, **kwargs)
def _update(self, value):
self._line.set_xdata([value, value])
self._line.axes.figure.canvas.draw()
def set_visible(self, visible):
self._line.set_visible(visible)
class RangeArtist(object):
def __init__(self, grip, **kwargs):
self.grip = grip
add_callback(grip, 'range', self._update)
ax = grip.viewer.axes
trans = blended_transform_factory(ax.transData, ax.transAxes)
kwargs.setdefault('lw', 2)
kwargs.setdefault('alpha', 0.5)
kwargs.setdefault('c', '#ffb304')
self._line, = ax.plot(self.x, self.y, transform=trans, **kwargs)
@property
def x(self):
l, r = self.grip.range
return [l, l, l, r, r, r]
@property
def y(self):
return [0, 1, .5, .5, 0, 1]
def _update(self, rng):
self._line.set_xdata(self.x)
self._line.axes.figure.canvas.draw()
def set_visible(self, visible):
self._line.set_visible(visible)
def _build_axes(figure):
ax2 = figure.add_subplot(122)
ax1 = figure.add_subplot(121, sharex=ax2)
ax1.xaxis.get_major_formatter().set_useOffset(False)
ax1.yaxis.get_major_formatter().set_useOffset(False)
ax2.xaxis.get_major_formatter().set_useOffset(False)
ax2.yaxis.get_major_formatter().set_useOffset(False)
return ax1, ax2
class ProfileViewer(object):
value_cls = ValueGrip
range_cls = RangeGrip
def __init__(self, figure):
self.axes, self.resid_axes = _build_axes(figure)
self._artist = None
self._resid_artist = None
self._x = self._xatt = self._y = self._yatt = None
self._resid = None
self.connect()
self._fit_artists = []
self.active_grip = None # which grip should receive events?
self.grips = []
self._xlabel = ''
def set_xlabel(self, xlabel):
self._xlabel = xlabel
def autoscale_ylim(self):
x, y = self._x, self._y
xlim = self.axes.get_xlim()
mask = (xlim[0] <= x) & (x <= xlim[1])
ymask = y[mask]
if ymask.size == 0:
return
ylim = np.nan_to_num(np.array([np.nanmin(ymask), np.nanmax(ymask)]))
self.axes.set_ylim(ylim[0], ylim[1] + .05 * (ylim[1] - ylim[0]))
if self._resid is None:
return
assert self._resid.size == y.size
ymask = self._resid[mask]
ylim = np.nan_to_num([np.nanmin(ymask), np.nanmax(ymask)])
diff = .05 * (ylim[1] - ylim[0])
self.resid_axes.set_ylim(ylim[0] - diff, ylim[1] + diff)
def _relayout(self):
if self._resid_artist is not None:
self.axes.set_position([0.1, .35, .88, .6])
self.resid_axes.set_position([0.1, .15, .88, .2])
self.resid_axes.set_xlabel(self._xlabel)
self.resid_axes.set_visible(True)
self.axes.set_xlabel('')
[t.set_visible(False) for t in self.axes.get_xticklabels()]
else:
self.resid_axes.set_visible(False)
self.axes.set_position([0.1, .15, .88, .83])
self.axes.set_xlabel(self._xlabel)
[t.set_visible(True) for t in self.axes.get_xticklabels()]
def set_profile(self, x, y, xatt=None, yatt=None, **kwargs):
"""
Set a new line profile
:param x: X-coordinate data
:type x: array-like
:param y: Y-coordinate data
:type y: array-like
:param xatt: ComponentID associated with X axis
:type xatt: :class:`~glue.core.data.ComponentID`
:param yatt: ComponentID associated with Y axis
:type yatt: :class:`~glue.core.data.ComponentID`
Extra kwargs are passed to matplotlib.plot, to
customize plotting
Returns the created MPL artist
"""
self.clear_fit()
self._x = np.asarray(x).ravel()
self._xatt = xatt
self._y = np.asarray(y).ravel()
self._yatt = yatt
if self._artist is not None:
self._artist.remove()
kwargs.setdefault('drawstyle', 'steps-mid')
self._artist = self.axes.plot(x, y, **kwargs)[0]
self._relayout()
self._redraw()
return self._artist
def clear_fit(self):
for a in self._fit_artists:
a.remove()
self._fit_artists = []
if self._resid_artist is not None:
self._resid_artist.remove()
self._resid_artist = None
def connect(self):
connect = self.axes.figure.canvas.mpl_connect
self._down_id = connect('button_press_event', self._on_down)
self._up_id = connect('button_release_event', self._on_up)
self._move_id = connect('motion_notify_event', self._on_move)
def disconnect(self):
off = self.axes.figure.canvas.mpl_disconnect
self._down_id = off(self._down_id)
self._up_id = off(self._up_id)
self._move_id = off(self._move_id)
def _on_down(self, event):
if not event.inaxes:
return
if event.dblclick:
if self.active_grip is not None:
self.active_grip.dblclick(event.xdata, event.ydata)
return
if self.active_grip is not None and self.active_grip.enabled:
self.active_grip.select(event.xdata, event.ydata)
def _on_up(self, event):
if not event.inaxes:
return
if self.active_grip is None or not self.active_grip.enabled:
return
self.active_grip.release()
def _on_move(self, event):
if not event.inaxes or event.button != 1:
return
if self.active_grip is None or not self.active_grip.enabled:
return
self.active_grip.drag(event.xdata, event.ydata)
def _redraw(self):
self.axes.figure.canvas.draw()
def profile_data(self, xlim=None):
if self._x is None or self._y is None:
raise ValueError("Must set profile first")
x = self._x
y = self._y
if xlim is not None:
mask = (min(xlim) <= x) & (x <= max(xlim))
x = x[mask]
y = y[mask]
return x, y
def fit(self, fitter, xlim=None):
try:
x, y = self.profile_data(xlim)
dy = None
except ValueError:
raise ValueError("Must set profile before fitting")
result = fitter.build_and_fit(x, y)
return result, x, y, dy
def plot_fit(self, fitter, fit_result):
self.clear_fit()
x = self._x
y = fitter.predict(fit_result, x)
self._fit_artists = fitter.plot(fit_result, self.axes, x)
resid = self._y - y
self._resid = resid
self._resid_artist, = self.resid_axes.plot(x, resid, 'k')
self.autoscale_ylim()
self._relayout()
def new_value_grip(self, callback=None):
"""
Create and return new ValueGrip
:param callback: A callback function to be invoked
whenever the grip.value property changes
"""
result = self.value_cls(self)
result.value = self._center[0]
if callback is not None:
add_callback(result, 'value', callback)
self.grips.append(result)
self.active_grip = result
return result
def new_range_grip(self, callback=None):
"""
Create and return new RangeGrip
:param callback: A callback function to be invoked
whenever the grip.range property changes
"""
result = self.range_cls(self)
center = self._center[0]
width = self._width
result.range = center - width / 4, center + width / 4
if callback is not None:
add_callback(result, 'range', callback)
self.grips.append(result)
self.active_grip = result
return result
@property
def _center(self):
"""Return the data coordinates of the axes center, as (x, y)"""
xy = self.axes.transAxes.transform([(.5, .5)])
xy = self.axes.transData.inverted().transform(xy)
return tuple(xy.ravel())
@property
def _width(self):
"""Return the X-width of axes in data units"""
xlim = self.axes.get_xlim()
return xlim[1] - xlim[0]
def pick_grip(self, x, y):
"""
Given a coordinate in Data units,
return the enabled Grip object nearest
that point, or None if none are nearby
"""
grips = [h for h in self.grips if h.enabled]
if not grips:
return
dist, grip = min((h.pick_dist(x, y), h)
for h in grips)
if dist < PICK_THRESH:
return grip
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/tools/spectrum_tool/qt/profile_viewer.py",
"copies": "4",
"size": "13923",
"license": "bsd-3-clause",
"hash": -7501474625993291000,
"line_mean": 27.2413793103,
"line_max": 76,
"alpha_frac": 0.5493069022,
"autogenerated": false,
"ratio": 3.5800977114939574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 493
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from mock import MagicMock
from glue import core
from ..data_slice_widget import SliceWidget, DataSlice
class TestSliceWidget(object):
def test_slice_center(self):
s = SliceWidget(lo=0, hi=10)
assert s.slice_center == 5
def test_browse_slice(self):
s = SliceWidget(lo=0, hi=10)
assert s.slice_center == 5
s._ui_slider.button_prev.click()
assert s.slice_center == 4
s._ui_slider.button_next.click()
s._ui_slider.button_next.click()
assert s.slice_center == 6
s._ui_slider.button_first.click()
assert s.slice_center == 0
s._ui_slider.button_prev.click()
assert s.slice_center == 10
s._ui_slider.button_next.click()
assert s.slice_center == 0
s._ui_slider.button_last.click()
assert s.slice_center == 10
s._ui_slider.button_next.click()
assert s.slice_center == 0
s._ui_slider.button_prev.click()
assert s.slice_center == 10
s._ui_slider.button_prev.click()
assert s.slice_center == 9
def test_slice_world(self):
s = SliceWidget(lo=0, hi=5, world=[1, 3, 5, 5.5, 8, 12])
# Check switching between world and pixel coordinates
s.slice_center = 0
assert s.slider_label == '1.0'
s.use_world = False
assert s.slider_label == '0'
s.slice_center = 3
assert s.slider_label == '3'
s.use_world = True
assert s.slider_label == '5.5'
# Round to nearest
s.slider_label = '11'
assert s.slice_center == 5
assert s.slider_label == '12.0'
# Make sure out of bound values work
s.slider_label = '20'
assert s.slice_center == 5
assert s.slider_label == '12.0'
s.slider_label = '-10'
assert s.slice_center == 0
assert s.slider_label == '1.0'
# And disable world and try and set by pixel
s.use_world = False
s.slider_label = '4'
assert s.slice_center == 4
assert s.slider_label == '4'
class TestArraySlice(object):
def test_1d(self):
d = core.Data(x=[1, 2, 3])
s = DataSlice(d)
assert s.slice == ('x',)
def test_2d(self):
d = core.Data(x=[[1]])
s = DataSlice(d)
assert s.slice == ('y', 'x')
def test_3d(self):
d = core.Data(x=np.zeros((3, 3, 3)))
s = DataSlice(d)
assert s.slice == (1, 'y', 'x')
def test_3d_change_mode(self):
d = core.Data(x=np.zeros((3, 4, 5)))
s = DataSlice(d)
changed = MagicMock()
s.slice_changed.connect(changed)
assert s.slice == (1, 'y', 'x')
s._slices[1].mode = 'slice'
assert s.slice == ('y', 1, 'x')
assert changed.call_count == 1
s._slices[2].mode = 'slice'
assert s.slice == ('y', 'x', 2)
assert changed.call_count == 2
s._slices[2].mode = 'y'
assert s.slice == (1, 'x', 'y')
assert changed.call_count == 3
s._slices[2].mode = 'x'
assert s.slice == (1, 'y', 'x')
assert changed.call_count == 4
def test_3d_change_slice(self):
d = core.Data(x=np.zeros((3, 4, 5)))
s = DataSlice(d)
changed = MagicMock()
s.slice_changed.connect(changed)
s._slices[0].slice_center = 2
assert s.slice == (2, 'y', 'x')
assert changed.call_count == 1
s._slices[1].mode = 'slice'
s._slices[1].slice_center = 0
assert s.slice == ('y', 0, 'x')
assert changed.call_count == 3
s._slices[2].mode = 'slice'
assert s.slice == ('y', 'x', 2)
assert changed.call_count == 4
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/common/qt/tests/test_data_slice_widget.py",
"copies": "2",
"size": "3801",
"license": "bsd-3-clause",
"hash": -5031886545041272000,
"line_mean": 27.5789473684,
"line_max": 64,
"alpha_frac": 0.5356485135,
"autogenerated": false,
"ratio": 3.305217391304348,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840865904804348,
"avg_score": 0,
"num_lines": 133
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from mock import MagicMock, patch
from ...config import settings
from .. import DataCollection, Data, SubsetGroup
from .. import subset
from ..subset import SubsetState
from ..subset_group import coerce_subset_groups
from .test_state import clone
def restore_settings(func):
def wrapper(*args, **kwargs):
settings.reset_defaults()
results = func(*args, **kwargs)
settings.reset_defaults()
return results
return wrapper
class TestSubsetGroup(object):
def setup_method(self, method):
x = Data(label='x', x=[1, 2, 3])
y = Data(label='y', y=[2, 4, 8])
self.dc = DataCollection([x, y])
self.sg = SubsetGroup()
def test_creation(self):
self.sg.register(self.dc)
sg = self.sg
for sub, data in zip(sg.subsets, self.dc):
assert sub is data.subsets[0]
def test_attributes_matched_to_group(self):
self.sg.register(self.dc)
sg = self.sg
for sub in sg.subsets:
assert sub.subset_state is sg.subset_state
assert sub.label is sg.label
def test_attributes_synced_to_group(self):
self.sg.register(self.dc)
sg = self.sg
sg.subsets[0].subset_state = SubsetState()
sg.subsets[0].label = 'testing'
for sub in sg.subsets:
assert sub.subset_state is sg.subset_state
assert sub.label is sg.label
def test_new_subset_group_syncs_style(self):
sg = self.dc.new_subset_group()
for sub in sg.subsets:
assert sub.style == sg.style
def test_changing_subset_style_changes_group(self):
# Test to make sure that if a subset's visual properties are changed,
# the visual properties of all subsets in the same subset group are changed
d1 = Data(x=[1, 2, 3], label='d1')
d2 = Data(y=[2, 3, 4], label='d2')
d3 = Data(y=[2, 3, 4], label='d3')
dc = DataCollection([d1, d2, d3])
sg = dc.new_subset_group(subset_state=d1.id['x'] > 1, label='A')
# Changing d1 subset properties changes group and other subsets
d1.subsets[0].style.color = '#c0b4a1'
assert sg.style.color == '#c0b4a1'
assert d2.subsets[0].style.color == '#c0b4a1'
assert d3.subsets[0].style.color == '#c0b4a1'
d2.subsets[0].style.alpha = 0.2
assert sg.style.alpha == 0.2
assert d1.subsets[0].style.alpha == 0.2
assert d3.subsets[0].style.alpha == 0.2
d3.subsets[0].style.markersize = 16
assert sg.style.markersize == 16
assert d1.subsets[0].style.markersize == 16
assert d2.subsets[0].style.markersize == 16
# Changing subset group changes subsets
sg.style.color = '#abcdef'
assert d1.subsets[0].style.color == '#abcdef'
assert d2.subsets[0].style.color == '#abcdef'
assert d3.subsets[0].style.color == '#abcdef'
sg.style.linewidth = 12
assert d1.subsets[0].style.linewidth == 12
assert d2.subsets[0].style.linewidth == 12
assert d3.subsets[0].style.linewidth == 12
def test_new_data_creates_subset(self):
sg = self.dc.new_subset_group()
d = Data(label='z', z=[10, 20, 30])
self.dc.append(d)
assert d.subsets[0] in sg.subsets
def test_remove_data_deletes_subset(self):
sg = self.dc.new_subset_group()
sub = self.dc[0].subsets[0]
self.dc.remove(self.dc[0])
assert sub not in sg.subsets
def test_subsets_given_data_reference(self):
sg = self.dc.new_subset_group()
assert sg.subsets[0].data is self.dc[0]
def test_data_collection_subset(self):
sg = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg,)
sg2 = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg, sg2)
def test_remove_subset(self):
sg = self.dc.new_subset_group()
n = len(self.dc[0].subsets)
self.dc.remove_subset_group(sg)
assert len(self.dc[0].subsets) == n - 1
def test_edit_broadcasts(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subsets[0].style.color = 'red'
assert bcast.call_count == 1
def test_braodcast(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subset_state = SubsetState()
assert bcast.call_count == 1
sg.style.color = '#123456'
assert bcast.call_count == 2
sg.label = 'new label'
assert bcast.call_count == 3
def test_auto_labeled(self):
sg = self.dc.new_subset_group()
assert sg.label is not None
def test_label_color_cycle(self):
sg1 = self.dc.new_subset_group()
sg2 = self.dc.new_subset_group()
assert sg1.label != sg2.label
assert sg1.style.color != sg2.style.color
def test_new_label(self):
sg = self.dc.new_subset_group(label='test')
assert sg.label == 'test'
def test_new_state(self):
state = SubsetState()
sg = self.dc.new_subset_group(subset_state=state)
assert sg.subset_state is state
def test_deleted_subsets_dont_respawn(self):
# regression test
sg1 = self.dc.new_subset_group()
self.dc.remove_subset_group(sg1)
d = Data(label='z', z=[1, 2, 3])
self.dc.append(d)
assert len(d.subsets) == 0
class TestSerialze(TestSubsetGroup):
def test_save_group(self):
sg = self.dc.new_subset_group()
sg2 = clone(sg)
assert sg.style == sg2.style
assert sg.label == sg2.label
def test_save_subset(self):
sg = self.dc.new_subset_group()
sg.subset_state = self.dc[0].id['x'] > 1
sub = sg.subsets[0]
dc = clone(self.dc)
sub2 = dc[0].subsets[0]
np.testing.assert_array_equal(sub2.to_mask(), [False, True, True])
assert sub2.style == sg.style
assert sub2.label == sg.label
def test_save_override(self):
sg = self.dc.new_subset_group()
sg.subsets[0].style.color = 'blue'
dc = clone(self.dc)
assert dc.subset_groups[0].style == sg.style
assert dc.subset_groups[0].subsets[0].style.color == 'blue'
class TestCombination(object):
def check_type_and_children(self, s1, s2, s3, statetype):
assert isinstance(s3, statetype)
assert s3.state1 is s1.subset_state
assert s3.state2 is s2.subset_state
def test_and(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 & s2, subset.AndState)
def test_or(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 | s2, subset.OrState)
def test_xor(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 ^ s2, subset.XorState)
def test_invert(self):
s1 = SubsetGroup()
assert isinstance(~s1, subset.InvertState)
class TestCoerce(object):
def setup_method(self, method):
self.x = Data(label='x', x=[1, 2, 3])
self.y = Data(label='y', y=[1, 2, 3])
self.dc = DataCollection([self.x, self.y])
def test_noop_on_good_setup(self):
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(self.dc)
assert warn.call_count == 0
def test_reassign_non_grouped_subsets(self):
s = self.x.new_subset()
dc = self.dc
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(dc)
assert len(dc.subset_groups) == 1
assert dc.subset_groups[0].subset_state is s.subset_state
assert dc.subset_groups[0].style == s.style
assert dc.subset_groups[0].label == s.label
assert warn.call_count == 1
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/tests/test_subset_group.py",
"copies": "2",
"size": "8031",
"license": "bsd-3-clause",
"hash": -4894715166341256000,
"line_mean": 30.1279069767,
"line_max": 83,
"alpha_frac": 0.5968123521,
"autogenerated": false,
"ratio": 3.3532359081419623,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9949817547801948,
"avg_score": 0.00004614248800295312,
"num_lines": 258
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from mock import MagicMock, patch
from .. import DataCollection, Data, SubsetGroup
from .. import subset
from ..subset import SubsetState
from ..subset_group import coerce_subset_groups
from .test_state import clone
class TestSubsetGroup(object):
def setup_method(self, method):
x = Data(label='x', x=[1, 2, 3])
y = Data(label='y', y=[2, 4, 8])
self.dc = DataCollection([x, y])
self.sg = SubsetGroup()
def test_creation(self):
self.sg.register(self.dc)
sg = self.sg
for sub, data in zip(sg.subsets, self.dc):
assert sub is data.subsets[0]
def test_attributes_matched_to_group(self):
self.sg.register(self.dc)
sg = self.sg
for sub in sg.subsets:
assert sub.subset_state is sg.subset_state
assert sub.label is sg.label
def test_attributes_synced_to_group(self):
self.sg.register(self.dc)
sg = self.sg
sg.subsets[0].subset_state = SubsetState()
sg.subsets[0].label = 'testing'
for sub in sg.subsets:
assert sub.subset_state is sg.subset_state
assert sub.label is sg.label
def test_set_style_overrides(self):
self.sg.register(self.dc)
sg = self.sg
sg.subsets[0].style.color = 'blue'
for sub in sg.subsets[1:]:
assert sub.style.color != 'blue'
assert sg.subsets[0].style.color == 'blue'
def test_new_subset_group_syncs_style(self):
sg = self.dc.new_subset_group()
for sub in sg.subsets:
assert sub.style == sg.style
def test_set_group_style_clears_override(self):
sg = self.dc.new_subset_group()
style = sg.style.copy()
style.parent = sg.subsets[0]
sg.subsets[0].style = style
style.color = 'blue'
sg.style.color = 'red'
assert sg.subsets[0].style.color == 'red'
def test_new_data_creates_subset(self):
sg = self.dc.new_subset_group()
d = Data(label='z', z=[10, 20, 30])
self.dc.append(d)
assert d.subsets[0] in sg.subsets
def test_remove_data_deletes_subset(self):
sg = self.dc.new_subset_group()
sub = self.dc[0].subsets[0]
self.dc.remove(self.dc[0])
assert sub not in sg.subsets
def test_subsets_given_data_reference(self):
sg = self.dc.new_subset_group()
assert sg.subsets[0].data is self.dc[0]
def test_data_collection_subset(self):
sg = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg,)
sg2 = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg, sg2)
def test_remove_subset(self):
sg = self.dc.new_subset_group()
n = len(self.dc[0].subsets)
self.dc.remove_subset_group(sg)
assert len(self.dc[0].subsets) == n - 1
def test_edit_broadcasts(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subsets[0].style.color = 'red'
assert bcast.call_count == 1
def test_braodcast(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subset_state = SubsetState()
assert bcast.call_count == 1
sg.style.color = '#123456'
assert bcast.call_count == 2
sg.label = 'new label'
assert bcast.call_count == 3
def test_auto_labeled(self):
sg = self.dc.new_subset_group()
assert sg.label is not None
def test_label_color_cycle(self):
sg1 = self.dc.new_subset_group()
sg2 = self.dc.new_subset_group()
assert sg1.label != sg2.label
assert sg1.style.color != sg2.style.color
def test_new_label(self):
sg = self.dc.new_subset_group(label='test')
assert sg.label == 'test'
def test_new_state(self):
state = SubsetState()
sg = self.dc.new_subset_group(subset_state=state)
assert sg.subset_state is state
def test_deleted_subsets_dont_respawn(self):
# regression test
sg1 = self.dc.new_subset_group()
self.dc.remove_subset_group(sg1)
d = Data(label='z', z=[1, 2, 3])
self.dc.append(d)
assert len(d.subsets) == 0
class TestSerialze(TestSubsetGroup):
def test_save_group(self):
sg = self.dc.new_subset_group()
sg2 = clone(sg)
assert sg.style == sg2.style
assert sg.label == sg2.label
def test_save_subset(self):
sg = self.dc.new_subset_group()
sg.subset_state = self.dc[0].id['x'] > 1
sub = sg.subsets[0]
dc = clone(self.dc)
sub2 = dc[0].subsets[0]
np.testing.assert_array_equal(sub2.to_mask(), [False, True, True])
assert sub2.style == sg.style
assert sub2.label == sg.label
def test_save_override(self):
sg = self.dc.new_subset_group()
sg.subsets[0].style.color = 'blue'
dc = clone(self.dc)
assert dc.subset_groups[0].style == sg.style
assert dc.subset_groups[0].subsets[0].style.color == 'blue'
class TestCombination(object):
def check_type_and_children(self, s1, s2, s3, statetype):
assert isinstance(s3, statetype)
assert s3.state1 is s1.subset_state
assert s3.state2 is s2.subset_state
def test_and(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 & s2, subset.AndState)
def test_or(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 | s2, subset.OrState)
def test_xor(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 ^ s2, subset.XorState)
def test_invert(self):
s1 = SubsetGroup()
assert isinstance(~s1, subset.InvertState)
class TestCoerce(object):
def setup_method(self, method):
self.x = Data(label='x', x=[1, 2, 3])
self.y = Data(label='y', y=[1, 2, 3])
self.dc = DataCollection([self.x, self.y])
def test_noop_on_good_setup(self):
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(self.dc)
assert warn.call_count == 0
def test_reassign_non_grouped_subsets(self):
s = self.x.new_subset()
dc = self.dc
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(dc)
assert len(dc.subset_groups) == 1
assert dc.subset_groups[0].subset_state is s.subset_state
assert dc.subset_groups[0].style == s.style
assert dc.subset_groups[0].label == s.label
assert warn.call_count == 1
| {
"repo_name": "saimn/glue",
"path": "glue/core/tests/test_subset_group.py",
"copies": "2",
"size": "6829",
"license": "bsd-3-clause",
"hash": -6937184065590735000,
"line_mean": 29.6233183857,
"line_max": 74,
"alpha_frac": 0.5929125787,
"autogenerated": false,
"ratio": 3.33447265625,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.992738523495,
"avg_score": 0,
"num_lines": 223
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy import (
sqrt,
minimum,
)
try:
_ = np.use_fastnumpy # Use Enthought MKL optimizations
from numpy.fft import rfft, irfft, rfftfreq
except AttributeError:
try:
import mklfft # MKL FFT optimizations from Continuum Analytics
from numpy.fft import rfft, irfft, rfftfreq
except ImportError:
# Finally, just use Numpy's and Scipy's
from numpy.fft import rfft, irfft, rfftfreq
from scipy.signal import fftconvolve
class EC(object):
"""Equalization-Cancellation process used by the STEC model [wan2014]_.
The `equalize` method finds the optimal gains and delays that minimizes
the energy of the cancelled signal.
The `cancel` method uses the gains and delays found by the `equalize`
method to "cancel" the two signals.
The `jitter` method applies amplitude and time jitters to the input as a
form of internal noise.
Examples
--------
>>> ec = EC()
>>> alphas, taus = ec.equalize(left, right, cf)
>>> y = ec.cancel(left, right, alphas, taus)
References
----------
.. [wan2014] Wan, R., Durlach, N. I., and Colburn, H. S. (2014).
"Application of a short-time version of the Equalization-Cancellation
model to speech intelligibility experiments with speech maskers",
The Journal of the Acoustical Society of America, 136(2), 768--776
"""
def __init__(self, fs, win_len=None, overlap=0.5, sigma_e=0.25,
sigma_d=105e-6, padding_windows=10, fast_cancel=True):
"""Equalization--Cancellation process.
Parameters
-----------
fs : int
Sampling frequency of the EC process.
win_len : float
Duration of a window, in seconds, where to apply the EC process. If
`None`, the EC process is applied to the whole signal. Defaults to
`None`.
overlap : float
Overlap between windows, in fraction of window. Defaults to 0.5 (i.e.
50%).
sigma_e : float
Mean value of the amplitude jitter in the EC process. Default is 0.25
as reported by Durlach (1963).
sigma_d : float
Mean duration of the time jitter. Default is 105us, as reported by
Durlach (1963).
"""
self.fs = fs
self.win_len = win_len
self.overlap = overlap
self.sigma_e = sigma_e
self.sigma_d = sigma_d
self.padding_windows = padding_windows
self.fast_cancel = fast_cancel
def equalize(self, left, right, cf):
"""Finds the optimal gains and delays that minimize the energy of the
cancelled signals.
Parameters
----------
left, right : ndarrays
Signals for which to find the optimal parameters. They can be 1D
or 2D. If they are 2D, the signals are cancelled along the last
dimension.
cf : float or list of floats
Center frequency of the channel at which the equalization takes
place. If the inputs are multi-channel, then cf must be a list of
center frequencies.
Returns
-------
alphas : ndarray
Optimal gains. The shape depends on the input signals and on the
`win_len` and ``overlap`` attributes.
taus : ndarrays
Optimal delays in seconds. The shape depends on the input signals
and on the ``win_len`` and `overlap` attributes.
"""
left = np.asanyarray(left, dtype='float')
right = np.asanyarray(right, dtype='float')
if left.ndim > 2 or right.ndim > 2:
raise ValueError("Input signals must have at most 2 dimensions.",
left.ndim, right.ndim)
if left.shape != right.shape:
raise ValueError("Both inputs must have must have the same shape.",
left.shape, right.shape)
if left.ndim == 2:
try:
if len(cf) != left.shape[0]:
raise ValueError("cf must have as many values as there "
"are channels in the inputs.")
except TypeError:
raise ValueError("cf must be iterable if there are more than "
"one channel.")
if left.ndim == 1 and right.ndim == 1:
# Use the whole signal.
alphas, taus = self._equalization(left, right, cf)
else: # the inputs are 2D
alphas = []
taus = []
for i_chan, cf in enumerate(cf):
chan_alphas, chan_taus = self._equalization(
left[i_chan], right[i_chan], cf)
alphas.append(chan_alphas)
taus.append(chan_taus)
alphas = np.array(alphas)
taus = np.asarray(taus)
return alphas, taus
def _equalization(self, left, right, cf):
"""Equalize two signals.
Parameters
----------
left, right: array
Single dimension array for left and right signal.
cf : float
Center frequency at which the equalization takes place.
Returns
-------
alphas : array
Gains for each window.
taus : array
Time delays for each window, in seconds.
Notes
-----
The window duration is set by the attribute ``win_len``, in seconds,
and the overlap between windows by ``overlap``, in fraction (e.g. 0.5
for 50 % overlap).
"""
n = left.shape[-1]
if self.win_len is None:
win = n
step = n
else:
win = int(self.win_len * self.fs)
if self.overlap:
step = int(win * self.overlap)
else:
step = win
n_valid_windows = self._n_valid_windows(n, win, step)
alphas = np.zeros(n_valid_windows)
taus = np.zeros(n_valid_windows)
for i_frame, hop in enumerate(range(0, n - win + 1, step)):
a, tau = self._calculate_alpha_tau(left[hop:hop + win],
right[hop:hop + win], cf=cf)
alphas[i_frame] = a
taus[i_frame] = tau
return alphas, taus
@staticmethod
def _n_valid_windows(n_samples, win_len, step):
"""Calculate the number of valid windows, considering overlap.
Parameters
----------
n_samples : int
Length of vector.
win_len : int
Window length, in samples.
step : int
Number of samples between frames, essentially overlap * window
length.
Returns
-------
n_windows : int
Number of valid windows.
"""
valid = np.maximum(n_samples - win_len, 0)
n_windows = valid // step
return n_windows + 1
def _calculate_alpha_tau(self, left, right, cf):
"""Finds optimal parameters for the EC process.
Performs equations (1) in Wan et al. (2014).
Parameters
----------
left, right : ndarray
w : float
Center frequency of the channel, in Hz.
Returns
-------
a : float
Level equalization parameter
tau : float
Delay, in seconds, that should be applied to `right` in order to
get close too `left`. Could also be explained as the delay
applied to `left`, with respect to `right`.
"""
E_L = left.dot(left.T)
E_R = right.dot(right.T)
# Alpha parameter for level equalization
alpha = sqrt(E_L / E_R)
tau = self._find_tau(left, right, cf)
return alpha, tau
def _find_tau(self, left, right, cf):
""" Returns the delay (in seconds) of the maximum of the cross-correlation
of two signals.
"""
left = np.asanyarray(left)
right = np.asanyarray(right)
left = left - np.mean(left)
right = right - np.mean(right)
if left.dot(left) == 0 or right.dot(right) == 0:
return 0
else:
n_samples = left.shape[-1]
# Cross correlation
# It should be normalized, according to the definition, but
# we only need the max value, so it is not necessary to compute it.
rho = fftconvolve(left, right[::-1], 'full')
# Eq 6, we have to find tau_0 in the range where |tau| < fs / cf_0
# i.e. (pi / omega_0)
max_delay_in_samples = minimum(
np.floor(np.pi / (2 * np.pi * cf) * self.fs),
n_samples // 2)
# First we limit the range to -fs/cf_0 < tau < fs/cf_0...
allowed_range = np.arange(-max_delay_in_samples,
max_delay_in_samples + 1, dtype=int)
# ... then we find where the maximum is that range.
tau = allowed_range[rho[allowed_range + n_samples - 1].argmax()]
return tau / self.fs
def cancel(self, left, right, alpha, tau):
"""Cancel left and right signal using gains and delays.
Parameters
----------
left, right : array_like
Signals for which to find the optimal parameters. They can be 1D
or 2D. If they are 2D, the signals are cancelled along the last
dimension.
alpha : ndarray
Optimal amplitude cancellation gains.
tau : ndarray
Optimal cancellation delays.
Returns
-------
y : ndarray
"""
left = np.asanyarray(left, dtype='float')
right = np.asanyarray(right, dtype='float')
alpha = np.asanyarray(alpha)
tau = np.asanyarray(tau)
if left.ndim > 2 or right.ndim > 2:
raise ValueError("Input signals must have at most 2 dimensions.",
left.ndim, right.ndim)
if left.shape != right.shape:
raise ValueError("Both inputs must have must have the same shape.",
left.shape, right.shape)
if left.ndim == 1 and right.ndim == 1:
out = self._single_chan_cancel(left, right, alpha, tau)
else: # the inputs are 2D
out = np.zeros_like(left)
for i_chan, (chan_alpha, chan_tau) in enumerate(zip(alpha, tau)):
out[i_chan, :] = self._single_chan_cancel(
left[i_chan],
right[i_chan],
chan_alpha,
chan_tau)
return out
def _single_chan_cancel(self, left, right, alphas, taus):
"""Equalize two signals.
Parameters
----------
left, right: ndarrays
Single dimension array for left and right signal.
alphas : ndarray
Gains for each window.
taus : ndarray
Time delays for each window, in samples.
Returns
-------
out : ndarray
Cancelled signals.
Notes
-----
The window duration is set by the attribute `win_len`, in samples,
and the overlap between windows by `overlap`, in fraction (e.g. 0.5
for 50 % overlap).
"""
n = left.shape[-1]
if self.win_len is None:
win = n
step = n
# Make sure the alphas and taus are iterable.
try:
iter(alphas)
except TypeError:
alphas = (alphas,)
try:
iter(taus)
except TypeError:
taus = (taus,)
else:
win = int(self.win_len * self.fs)
if self.overlap:
step = int(win * self.overlap)
else:
step = win
out = np.zeros_like(left)
extra = self.padding_windows * win
for i_frame, (a, tau, hop) in enumerate(
zip(alphas, taus, range(0, n - win + 1, step))):
if tau == 0:
out[hop:hop + win] += 1 / sqrt(a) * left[hop:hop + win] \
- sqrt(a) * right[hop:hop + win]
else:
if self.fast_cancel:
# Shift only a section of the signal, instead of the its
# entirety. The "window" size is defined by the `padding_windows`
# parameter. The size of the original window is increased
# by 2*padding_windows (one before, one after).
lower = np.maximum(hop - extra, 0)
if lower == 0:
new_hop = hop
else:
new_hop = extra
upper = np.minimum(hop + win + extra, n)
out[hop:hop + win] += (
1 / sqrt(a) * self._shift(left[lower:upper], -tau / 2)
- sqrt(a) * self._shift(right[lower:upper], tau / 2)
)[new_hop:new_hop + win]
else:
out[hop:hop + win] += 1 / sqrt(a) \
* self._shift(left, -tau / 2)[hop:hop + win] \
- sqrt(a) * self._shift(right, tau / 2)[hop:hop + win]
if self.overlap:
out *= self.overlap
return out
def _shift(self, x, delay):
"""Shift signal according to a delay and pads with zeros.
Parameters
----------
x : array
Signal.
delay : int
Delay in seconds. Positive values correspond to a delay in time,
i.e. the signal "starts later". Negative values correspond to a
signal starting "earlier".
Returns
-------
out : ndarray
Delayed signal
"""
n = x.shape[-1]
y = rfft(x)
w = rfftfreq(n, 1 / self.fs) * 2 * np.pi
y *= np.exp(-1j * w * delay)
return np.real(irfft(y, n))
def jitter(self, x, out=None):
"""Applies amplitude and time jitter to a signal.
Parameters
----------
x : array_like
Input signal, will be casted to 'float'. It can be one or 2
dimensional.
out : None or array_like
Define where to write the jitter signal. Defaults to `None`,
i.e. creates a new array. Can be used to jitter an array "in
place".
Returns
-------
out : ndarray
Jittered signal.
Notes
-----
The amplitude jitters are taken from a normal Gaussian distribution
with a mean of zero and a standard distribution of ``sigma_e``. The time
jitters are taken from a normal Gaussian distribution with mean zero
and standard distribution ``sigma_d`` in seconds. The default jitter
values come from [durlach1963]_.
References
----------
.. [durlach1963] Durlach, N. I. (1963). "Equalization and
Cancellation Theory of Binaural Masking-Level Differences", J. Acoust.
Soc. Am., 35(), 1206--1218
"""
x = np.asanyarray(x, dtype='float')
epsilons, deltas = self.create_jitter(x)
out = self.apply_jitter(x, epsilons, deltas, out=out)
return out
def create_jitter(self, x):
"""Create amplitude and time jitter for a signal.
Parameters
----------
x : ndarray
Input signal.
Returns
-------
alphas : ndarray of floats
Amplitude jitters.
deltas : ndarray of ints
Jitter indices.
"""
n_x = x.shape[-1]
# Amplitude jitter
a_jitter = self.sigma_e * np.random.randn(*x.shape)
# Time jitter
if x.ndim > 1:
idx = np.tile(np.arange(n_x, dtype='float'), (x.shape[0], 1))
else:
idx = np.arange(n_x, dtype='float')
t_jitter = self.sigma_d * self.fs * np.random.randn(*idx.shape)
return a_jitter, t_jitter
@staticmethod
def apply_jitter(x, epsilons, deltas, out=None):
"""Apply jitter to a signal
Parameters
----------
x : ndarray
Input signal.
epsilons : ndarray of floats
Amplitude jitter coefficients.
deltas : ndarray of ints
Time jitters, they have to be integers because they will be
used as indices.
out : array or None
Array where to write the output. If None, which is the default,
the function returns a new array.
Returns
-------
out : ndarray
Jittered signal.
"""
n_cf = x.shape[0]
n_x = x.shape[-1]
if x.ndim > 1:
chan_idx = np.tile(np.arange(n_cf)[np.newaxis].T, (1, n_x))
idx = np.tile(np.arange(n_x, dtype='float'), (x.shape[0], 1))
else:
# Single channel
chan_idx = Ellipsis
idx = np.arange(n_x, dtype='float')
# Apply the jitter to the idx.
idx += deltas
# Limit the indices to the length of the array
idx = np.clip(idx, 0, n_x - 1, out=idx)
idx = np.round(idx, out=idx).astype('int')
# Create indices for channels, it's a n_cf x n_x array, where each row
# is filled with the row number.
# Same for the "ear" dimension
# ear_idx = np.tile(np.arange(2)[np.newaxis].T, (n_cf, 1, n_x))
if out is None:
out = x * (1 - epsilons)
out[..., :] = out[chan_idx, idx]
else:
x *= (1 - epsilons)
x[..., :] = x[chan_idx, idx]
out[...] = x
return out
| {
"repo_name": "achabotl/pambox",
"path": "pambox/central/ec.py",
"copies": "1",
"size": "17944",
"license": "bsd-3-clause",
"hash": 4466792650374887000,
"line_mean": 32.7928436911,
"line_max": 85,
"alpha_frac": 0.5207311636,
"autogenerated": false,
"ratio": 4.163341067285383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184072230885383,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.lib import NumpyVersion
from .common import Benchmark
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
class Randint(Benchmark):
def time_randint_fast(self):
"""Compare to uint32 below"""
np.random.randint(0, 2 ** 30, size=10 ** 5)
def time_randint_slow(self):
"""Compare to uint32 below"""
np.random.randint(0, 2 ** 30 + 1, size=10 ** 5)
class Randint_dtype(Benchmark):
high = {
'bool': 1,
'uint8': 2 ** 7,
'uint16': 2 ** 15,
'uint32': 2 ** 31,
'uint64': 2 ** 63
}
param_names = ['dtype']
params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
def setup(self, name):
if NumpyVersion(np.__version__) < '1.11.0.dev0':
raise NotImplementedError
def time_randint_fast(self, name):
high = self.high[name]
np.random.randint(0, high, size=10 ** 5, dtype=name)
def time_randint_slow(self, name):
high = self.high[name]
np.random.randint(0, high + 1, size=10 ** 5, dtype=name)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_random.py",
"copies": "1",
"size": "1645",
"license": "mit",
"hash": 6788555274493448000,
"line_mean": 24.3076923077,
"line_max": 66,
"alpha_frac": 0.5696048632,
"autogenerated": false,
"ratio": 3.37782340862423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.444742827182423,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.lib.stride_tricks import as_strided
import pandas as pd
from glue.external.six import string_types
__all__ = ['unique', 'shape_to_string', 'view_shape', 'stack_view',
'coerce_numeric', 'check_sorted', 'broadcast_to', 'unbroadcast']
def unbroadcast(array):
"""
Given an array, return a new array that is the smallest subset of the
original array that can be re-broadcasted back to the original array.
See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays
for more details.
"""
if array.ndim == 0:
return array
new_shape = np.where(np.array(array.strides) == 0, 1, array.shape)
return as_strided(array, shape=new_shape)
def unique(array):
"""
Return the unique elements of the array U, as well as
the index array I such that U[I] == array
Parameters
----------
array : `numpy.ndarray`
The array to use
Returns
-------
U : `numpy.ndarray`
The unique elements of the array
I : `numpy.ndarray`
The indices such that ``U[I] == array``
"""
# numpy.unique doesn't handle mixed-types on python3,
# so we use pandas
U, I = pd.factorize(array, sort=True)
return I, U
def shape_to_string(shape):
"""
On Windows, shape tuples use long ints which results in formatted shapes
such as (2L, 3L). This function ensures that the shape is always formatted
without the Ls.
"""
return "({0})".format(", ".join(str(int(item)) for item in shape))
def view_shape(shape, view):
"""
Return the shape of a view of an array.
Returns equivalent of ``np.zeros(shape)[view].shape``
Parameters
----------
shape : tuple
The shape of the array
view : slice
A valid index into a Numpy array, or None
"""
if view is None:
return shape
shp = tuple(slice(0, s, 1) for s in shape)
xy = np.broadcast_arrays(*np.ogrid[shp])
assert xy[0].shape == shape
return xy[0][view].shape
def stack_view(shape, *views):
shp = tuple(slice(0, s, 1) for s in shape)
result = np.broadcast_arrays(*np.ogrid[shp])
for v in views:
if isinstance(v, string_types) and v == 'transpose':
result = [r.T for r in result]
continue
result = [r[v] for r in result]
return tuple(result)
def coerce_numeric(arr):
"""
Coerce an array into a numeric array, replacing non-numeric elements with
nans.
If the array is already a numeric type, it is returned unchanged
Parameters
----------
arr : `numpy.ndarray`
The array to coerce
"""
# already numeric type
if np.issubdtype(arr.dtype, np.number):
return arr
if np.issubdtype(arr.dtype, np.bool_):
return arr.astype(np.int)
# a string dtype, or anything else
try:
return pd.to_numeric(arr, errors='coerce')
except AttributeError: # pandas < 0.19
return pd.Series(arr).convert_objects(convert_numeric=True).values
def check_sorted(array):
"""
Return `True` if the array is sorted, `False` otherwise.
"""
# this ignores NANs, and does the right thing if nans
# are concentrated at beginning or end of array
# otherwise, it will miss things at nan/finite boundaries
array = np.asarray(array)
return not (array[:-1] > array[1:]).any()
def pretty_number(numbers):
"""
Convert a list/array of numbers into a nice list of strings
Parameters
----------
numbers : list
The numbers to convert
"""
try:
return [pretty_number(n) for n in numbers]
except TypeError:
pass
n = numbers
if n == 0:
result = '0'
elif (abs(n) < 1e-3) or (abs(n) > 1e3):
result = "%0.3e" % n
elif abs(int(n) - n) < 1e-3 and int(n) != 0:
result = "%i" % n
else:
result = "%0.3f" % n
if result.find('.') != -1:
result = result.rstrip('0')
return result
def broadcast_to(array, shape):
"""
Compatibility function - can be removed once we support only Numpy 1.10
and above
"""
try:
return np.broadcast_to(array, shape)
except AttributeError:
array = np.asarray(array)
return np.broadcast_arrays(array, np.ones(shape, array.dtype))[0]
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/array.py",
"copies": "1",
"size": "4429",
"license": "bsd-3-clause",
"hash": 7107556927421325000,
"line_mean": 24.6011560694,
"line_max": 81,
"alpha_frac": 0.6089410702,
"autogenerated": false,
"ratio": 3.6785714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787512498771428,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (assert_allclose,
assert_equal, assert_almost_equal, assert_raises)
from scipy.spatial import procrustes
class TestProcrustes(object):
def setup_method(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
# tests procrustes' ability to match two matrices.
#
# the second matrix is a rotated, shifted, scaled, and mirrored version
# of the first, in two dimensions only
#
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
assert_allclose(b, a)
assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
#assert_(disp13 < 0.5 ** 2)
def test_procrustes2(self):
# procrustes disparity should not depend on order of matrices
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
assert_raises(ValueError, procrustes,
np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
assert_raises(ValueError, procrustes, empty, empty)
def test_procrustes_no_variation(self):
assert_raises(ValueError, procrustes,
np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_procrustes_bad_number_of_dimensions(self):
# fewer dimensions in one dataset
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([[1, 2], [3, 4]]))
# fewer dimensions in both datasets
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([1, 1, 2, 3, 5, 8]))
# zero dimensions
assert_raises(ValueError, procrustes, np.array(7), np.array(11))
# extra dimensions
assert_raises(ValueError, procrustes,
np.array([[[11], [7]]]),
np.array([[[5, 13]]]))
| {
"repo_name": "apbard/scipy",
"path": "scipy/spatial/tests/test__procrustes.py",
"copies": "1",
"size": "5049",
"license": "bsd-3-clause",
"hash": -6663346325361193000,
"line_mean": 41.7881355932,
"line_max": 79,
"alpha_frac": 0.5066349772,
"autogenerated": false,
"ratio": 3.249034749034749,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42556697262347487,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (TestCase, run_module_suite, assert_allclose,
assert_equal, assert_almost_equal, assert_raises)
from scipy.spatial import procrustes
class ProcrustesTests(TestCase):
def setUp(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
# tests procrustes' ability to match two matrices.
#
# the second matrix is a rotated, shifted, scaled, and mirrored version
# of the first, in two dimensions only
#
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
assert_allclose(b, a)
assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
# self.assertTrue(disp13 < 0.5 ** 2)
def test_procrustes2(self):
# procrustes disparity should not depend on order of matrices
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
assert_raises(ValueError, procrustes,
np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
assert_raises(ValueError, procrustes, empty, empty)
def test_procrustes_no_variation(self):
assert_raises(ValueError, procrustes,
np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_procrustes_bad_number_of_dimensions(self):
# fewer dimensions in one dataset
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([[1, 2], [3, 4]]))
# fewer dimensions in both datasets
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([1, 1, 2, 3, 5, 8]))
# zero dimensions
assert_raises(ValueError, procrustes, np.array(7), np.array(11))
# extra dimensions
assert_raises(ValueError, procrustes,
np.array([[[11], [7]]]),
np.array([[[5, 13]]]))
if __name__ == '__main__':
run_module_suite()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scipy-master/scipy/spatial/tests/test__procrustes.py",
"copies": "1",
"size": "5153",
"license": "mit",
"hash": -4007413244538559000,
"line_mean": 41.9416666667,
"line_max": 79,
"alpha_frac": 0.5063070056,
"autogenerated": false,
"ratio": 3.2655259822560203,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.427183298785602,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame
import numpy as np
from odo import resource, into
from datashape.predicates import isscalar, iscollection, isrecord
from blaze.expr import symbol, by
from blaze.interactive import Data
from blaze.compute import compute
from blaze.expr.functions import sin, exp
sources = []
t = symbol('t', 'var * {amount: int64, id: int64, name: string}')
L = [[ 100, 1, 'Alice'],
[ 200, 2, 'Bob'],
[ 300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[ 500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
sources = [df, x]
try:
import sqlalchemcy
sql = resource('sqlite:///:memory:::accounts', dshape=t.dshape)
into(sql, L)
sources.append(sql)
except:
sql = None
try:
import bcolz
bc = into(bcolz.ctable, df)
sources.append(bc)
except ImportError:
bc = None
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
from blaze.mongo import *
try:
db = pymongo.MongoClient().db
db._test_comprehensive.drop()
mongo = into(db._test_comprehensive, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
abs(t['amount']): [],
t.id.max(): [],
t.amount.sum(): [],
t.amount.sum(keepdims=True): [],
t.amount.count(keepdims=True): [],
t.amount.nunique(keepdims=True): [mongo],
t.amount.nunique(): [],
t.amount.head(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t.like(name='Alic*'): [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, schema='int64', name='id'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t.name, total=t.amount.sum()): [],
by(t.id, count=t.id.count()): [],
by(t[['id', 'amount']], count=t.id.count()): [],
by(t[['id', 'amount']], total=(t.amount + 1).sum()): [mongo],
by(t[['id', 'amount']], n=t.name.nunique()): [mongo, bc],
by(t.id, count=t.amount.count()): [],
by(t.id, n=t.id.nunique()): [mongo, bc],
# by(t, count=t.count()): [],
# by(t.id, count=t.count()): [],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
t[0]: [sql, mongo, bc],
t[::2]: [sql, mongo, bc],
t.id.utcfromtimestamp: [sql],
t.distinct().nrows: [],
t.nelements(axis=0): [],
t.nelements(axis=None): [],
t.amount.truncate(200): [sql]
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
# and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def typename(obj):
return type(obj).__name__
def test_base():
for expr, exclusions in expressions.items():
if iscollection(expr.dshape):
model = into(DataFrame, into(np.ndarray, expr._subs({t: Data(base, t.dshape)})))
else:
model = compute(expr._subs({t: Data(base, t.dshape)}))
print('\nexpr: %s\n' % expr)
for source in sources:
if id(source) in map(id, exclusions):
continue
print('%s <- %s' % (typename(model), typename(source)))
T = Data(source)
if iscollection(expr.dshape):
result = into(type(model), expr._subs({t: T}))
if isscalar(expr.dshape.measure):
assert set(into(list, result)) == set(into(list, model))
else:
assert df_eq(result, model)
elif isrecord(expr.dshape):
result = compute(expr._subs({t: T}))
assert into(tuple, result) == into(tuple, model)
else:
result = compute(expr._subs({t: T}))
assert result == model
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/tests/test_comprehensive.py",
"copies": "1",
"size": "4373",
"license": "bsd-3-clause",
"hash": 8019634906921651000,
"line_mean": 30.0141843972,
"line_max": 92,
"alpha_frac": 0.5314429453,
"autogenerated": false,
"ratio": 3.3003773584905662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4331820303790566,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame
import numpy as np
import bcolz
from datashape.predicates import isscalar, iscollection, isrecord
from blaze.expr import Symbol, by
from blaze.api import Data, into
from blaze.compute import compute
from blaze.expr.functions import sin, exp
from blaze.sql import SQL
sources = []
t = Symbol('t', 'var * {amount: int64, id: int64, name: string}')
L = [[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[400, 4, 'Dan'],
[500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
bc = into(bcolz.ctable, df)
sql = SQL('sqlite:///:memory:', 'accounts', schema=t.schema)
sql.extend(L)
sources = [df, x, bc, sql]
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
from blaze.mongo import *
try:
db = pymongo.MongoClient().db
db._test_comprehensive.drop()
mongo = into(db._test_comprehensive, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
t.id.max(): [],
t.amount.sum(): [],
t.amount.head(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t.like(name='Alic*'): [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, schema='int', name='id'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t.name, t.amount.sum()): [],
by(t.id, t.id.count()): [],
by(t[['id', 'amount']], t.id.count()): [],
by(t[['id', 'amount']], (t.amount + 1).sum()): [mongo],
by(t[['id', 'amount']], t.name.nunique()): [mongo],
by(t.id, t.amount.count()): [],
by(t.id, t.id.nunique()): [mongo],
# by(t, t.count()): [],
# by(t.id, t.count()): [df],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
t[0]: [sql, mongo],
t[::2]: [sql, mongo],
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def typename(obj):
return type(obj).__name__
def test_base():
for expr, exclusions in expressions.items():
model = compute(expr._subs({t: Data(base, t.dshape)}))
print('\nexpr: %s\n' % expr)
for source in sources:
if id(source) in map(id, exclusions):
continue
print('%s <- %s' % (typename(model), typename(source)))
T = Data(source)
if iscollection(expr.dshape):
result = into(model, expr._subs({t: T}))
if isscalar(expr.dshape.measure):
assert set(into([], result)) == set(into([], model))
else:
assert df_eq(result, model)
elif isrecord(expr.dshape):
result = compute(expr._subs({t: T}))
assert into(tuple, result) == into(tuple, model)
else:
result = compute(expr._subs({t: T}))
assert result == model
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/tests/test_comprehensive.py",
"copies": "1",
"size": "3618",
"license": "bsd-3-clause",
"hash": -1023266594592036500,
"line_mean": 29.6610169492,
"line_max": 78,
"alpha_frac": 0.5243228303,
"autogenerated": false,
"ratio": 3.277173913043478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4301496743343478,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame, Series
from ..expr import Reduction, Field, Projection, Broadcast, Selection
from ..expr import Distinct, Sort, Head, Label, ReLabel, Union, Expr, Slice
from ..expr import std, var, count, nunique
from ..expr import BinOp, UnaryOp, USub, Not
from .core import base, compute
from ..dispatch import dispatch
from ..api.into import into
import pandas as pd
__all__ = ['np']
@dispatch(Field, np.ndarray)
def compute_up(c, x, **kwargs):
if x.dtype.names and c._name in x.dtype.names:
return x[c._name]
if not x.dtype.names and x.shape[1] == len(c._child.fields):
return x[:, c._child.fields.index(c._name)]
raise NotImplementedError() # pragma: no cover
@dispatch(Projection, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names and all(col in x.dtype.names for col in t.fields):
return x[t.fields]
if not x.dtype.names and x.shape[1] == len(t._child.fields):
return x[:, [t._child.fields.index(col) for col in t.fields]]
raise NotImplementedError() # pragma: no cover
@dispatch(Broadcast, np.ndarray)
def compute_up(t, x, **kwargs):
d = dict((t._child[c]._expr, x[c]) for c in t._child.fields)
return compute(t._expr, d)
@dispatch(BinOp, np.ndarray, (np.ndarray, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, base, np.ndarray)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(np, t.symbol)(x)
@dispatch(Not, np.ndarray)
def compute_up(t, x, **kwargs):
return ~x
@dispatch(USub, np.ndarray)
def compute_up(t, x, **kwargs):
return -x
@dispatch(count, np.ndarray)
def compute_up(t, x, **kwargs):
return pd.notnull(x).sum()
@dispatch(nunique, np.ndarray)
def compute_up(t, x, **kwargs):
return len(np.unique(x))
@dispatch(Reduction, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(axis=t.axis, keepdims=t.keepdims)
@dispatch((std, var), np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(ddof=t.unbiased)
@dispatch(Distinct, np.ndarray)
def compute_up(t, x, **kwargs):
return np.unique(x)
@dispatch(Sort, np.ndarray)
def compute_up(t, x, **kwargs):
if (t.key in x.dtype.names or
isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):
result = np.sort(x, order=t.key)
elif t.key:
raise NotImplementedError("Sort key %s not supported" % str(t.key))
else:
result = np.sort(x)
if not t.ascending:
result = result[::-1]
return result
@dispatch(Head, np.ndarray)
def compute_up(t, x, **kwargs):
return x[:t.n]
@dispatch(Label, np.ndarray)
def compute_up(t, x, **kwargs):
return np.array(x, dtype=[(t.label, x.dtype.type)])
@dispatch(ReLabel, np.ndarray)
def compute_up(t, x, **kwargs):
types = [x.dtype[i] for i in range(len(x.dtype))]
return np.array(x, dtype=list(zip(t.fields, types)))
@dispatch(Selection, np.ndarray)
def compute_up(sel, x, **kwargs):
return x[compute(sel.predicate, {sel._child: x})]
@dispatch(Union, np.ndarray, tuple)
def compute_up(expr, example, children, **kwargs):
return np.concatenate(list(children), axis=0)
@dispatch(Slice, np.ndarray)
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch(Expr, np.ndarray)
def compute_up(t, x, **kwargs):
if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:
df = DataFrame(columns=t._child.fields)
else:
df = Series(name=t._child.fields[0])
return compute_up(t, into(df, x), **kwargs)
@dispatch(np.ndarray)
def chunks(x, chunksize=1024):
start = 0
n = len(x)
while start < n:
yield x[start:start + chunksize]
start += chunksize
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/numpy.py",
"copies": "1",
"size": "3923",
"license": "bsd-3-clause",
"hash": 193463488919735650,
"line_mean": 24.3096774194,
"line_max": 78,
"alpha_frac": 0.6543461637,
"autogenerated": false,
"ratio": 2.978739559605163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4133085723305163,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from qtpy import QtGui
from glue.core import roi as roimod
__all__ = ['cmap2pixmap', 'ginga_graphic_to_roi']
def cmap2pixmap(cmap, steps=50):
"""Convert a Ginga colormap into a QtGui.QPixmap
:param cmap: The colormap to use
:type cmap: Ginga colormap instance (e.g. ginga.cmap.get_cmap('gray'))
:param steps: The number of color steps in the output. Default=50
:type steps: int
:rtype: QtGui.QPixmap
"""
inds = np.linspace(0, 1, steps)
n = len(cmap.clst) - 1
tups = [cmap.clst[int(x * n)] for x in inds]
rgbas = [QtGui.QColor(int(r * 255), int(g * 255),
int(b * 255), 255).rgba() for r, g, b in tups]
im = QtGui.QImage(steps, 1, QtGui.QImage.Format_Indexed8)
im.setColorTable(rgbas)
for i in range(steps):
im.setPixel(i, 0, i)
im = im.scaled(128, 32)
pm = QtGui.QPixmap.fromImage(im)
return pm
def ginga_graphic_to_roi(obj):
if obj.kind == 'rectangle':
roi = roimod.RectangularROI(xmin=obj.x1, xmax=obj.x2,
ymin=obj.y1, ymax=obj.y2)
elif obj.kind == 'circle':
roi = roimod.CircularROI(xc=obj.x, yc=obj.y,
radius=obj.radius)
elif obj.kind == 'polygon':
vx = map(lambda xy: xy[0], obj.points)
vy = map(lambda xy: xy[1], obj.points)
roi = roimod.PolygonalROI(vx=vx, vy=vy)
else:
raise Exception("Don't know how to convert shape '%s' to a ROI" % (
obj.kind))
return roi
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/ginga_viewer/qt/utils.py",
"copies": "1",
"size": "1611",
"license": "bsd-3-clause",
"hash": -8977182342137021000,
"line_mean": 31.22,
"line_max": 75,
"alpha_frac": 0.5890751086,
"autogenerated": false,
"ratio": 3.0685714285714285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9157646537171429,
"avg_score": 0,
"num_lines": 50
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.ndimage import gaussian_filter
from glue.core.data import Subset
from glue.core.exceptions import IncompatibleAttribute
from .layer_state import IsosurfaceLayerState
from ..common.layer_artist import VispyLayerArtist
from ..extern.vispy.color import BaseColormap
from .multi_iso_visual import MultiIsoVisual
DATA_PROPERTIES = set(['attribute', 'level_low', 'level_high'])
LEVEL_PROPERTIES = set(['step_value'])
COLOR_PROPERTIES = set(['color', 'alpha', 'cmap'])
STEP_PROPERTIES = set(['step'])
# TODO: create colormaps that is prettier
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_grays(int l){
if (l==1)
{return $color_0;}
if (l==2)
{return $color_1;}
if (l==3)
{return $color_2;}
if (l==4)
{return $color_3;}
if (l==5)
{return $color_4;}
if (l==6)
{return $color_5;}
if (l==7)
{return $color_6;}
if (l==8)
{return $color_7;}
if (l==9)
{return $color_8;}
if (l==10)
{return $color_9;}
}
"""
# class AutoCmap(BaseColormap):
# colors =
# glsl_map = """
# vec4 translucent_grays(int l){
#
# }
# """
# vec4 translucent_fire(float t) {
# return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
# }
class IsosurfaceLayerArtist(VispyLayerArtist):
"""
A layer artist to render isosurfaces.
"""
def __init__(self, vispy_viewer, layer=None, layer_state=None):
super(IsosurfaceLayerArtist, self).__init__(layer)
self._clip_limits = None
self.layer = layer or layer_state.layer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or IsosurfaceLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# self._iso_visual = scene.Isosurface(np.ones((3, 3, 3)), level=0.5, shading='smooth')
# Create isosurface visual
self._iso_visual = MultiIsoVisual(np.ones((3, 3, 3)), step=4, relative_step_size=0.5)
# relative_step_size: ray casting performance, recommond 0.5~1.5)
self.vispy_widget.add_data_visual(self._iso_visual)
self._viewer_state.add_global_callback(self._update_volume)
self.state.add_global_callback(self._update_volume)
self.reset_cache()
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@property
def bbox(self):
return (-0.5, self.layer.shape[2] - 0.5,
-0.5, self.layer.shape[1] - 0.5,
-0.5, self.layer.shape[0] - 0.5)
def redraw(self):
"""
Redraw the Vispy canvas
"""
self.vispy_widget.canvas.update()
def clear(self):
"""
Remove the layer artist from the visualization
"""
self._iso_visual.parent = None
def _update_level(self):
# TODO: set iso clim
# self._iso_visual.set_data()
pass
def _update_step(self):
# TODO: generate a new color and transparancy scheme based on step num
self._iso_visual.step = self.state.step
self.redraw()
# self._update_color()
def _update_color(self):
cmap_data = self.state.cmap(np.linspace(0, 1, 10).tolist()) # self.cmap returns 10 colors
cmap_data = cmap_data.tolist()
t = TransFire(colors=cmap_data)
self._iso_visual.cmap = t
self.redraw()
def _update_data(self):
if isinstance(self.layer, Subset):
try:
mask = self.layer.to_mask()
except IncompatibleAttribute:
mask = np.zeros(self.layer.data.shape, dtype=bool)
data = mask.astype(float)
else:
data = self.layer[self.state.attribute]
if self._clip_limits is not None:
xmin, xmax, ymin, ymax, zmin, zmax = self._clip_limits
imin, imax = int(np.ceil(xmin)), int(np.ceil(xmax))
jmin, jmax = int(np.ceil(ymin)), int(np.ceil(ymax))
kmin, kmax = int(np.ceil(zmin)), int(np.ceil(zmax))
invalid = -np.inf
data = data.copy()
data[:, :, :imin] = invalid
data[:, :, imax:] = invalid
data[:, :jmin] = invalid
data[:, jmax:] = invalid
data[:kmin] = invalid
data[kmax:] = invalid
# self._iso_visual.set_data(np.nan_to_num(data).transpose())
gaussian_data = gaussian_filter(data/4, 1)
# TODO: the clim here conflict with set levels
# self._iso_visual.set_data(
# np.nan_to_num(gaussian_data),
# clim=(self.level_low, self.level_high))
# self._iso_visual.step = self.step
self._iso_visual.set_data(np.nan_to_num(gaussian_data))
self.redraw()
def _update_visibility(self):
# if self.visible:
# self._iso_visual.parent =
# else:
# self._multivol.disable(self.id)
self.redraw()
def set_clip(self, limits):
self._clip_limits = limits
self._update_data()
def _update_volume(self, force=False, **kwargs):
if self.state.attribute is None or self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or len(changed & DATA_PROPERTIES) > 0:
self._update_data()
if force or len(changed & LEVEL_PROPERTIES) > 0:
self._update_level()
if force or len(changed & COLOR_PROPERTIES) > 0:
self._update_color()
if force or len(changed & STEP_PROPERTIES) > 0:
self._update_step()
def update(self):
self._update_volume(force=True)
self.redraw()
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/isosurface/layer_artist.py",
"copies": "2",
"size": "6925",
"license": "bsd-2-clause",
"hash": -837508147351221500,
"line_mean": 30.334841629,
"line_max": 98,
"alpha_frac": 0.5810830325,
"autogenerated": false,
"ratio": 3.4555888223552893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035258123300741044,
"num_lines": 221
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.sparse import coo_matrix
from pymor.core.interfaces import inject_sid
from pymor.discretizations import StationaryDiscretization
from pymor.domaindescriptions import TorusDomain, CircleDomain
from pymor.domaindiscretizers import discretize_domain_default
from pymor.functions import ConstantFunction
from pymor.grids import TriaGrid
from pymor.la import NumpyVectorSpace, NumpyVectorArray
from pymor.operators.basic import NumpyMatrixBasedOperator
from pymor.operators.constructions import (LincombOperator, IdentityOperator, Concatenation,
VectorOperator, ConstantOperator)
from pymor.operators.cg import DiffusionOperatorP1, L2ProductFunctionalP1
from pymor.reductors.stationary import reduce_stationary_coercive
from pymor.gui.qt import PatchVisualizer, Matplotlib1DVisualizer
class ZeroMeanStationaryDiscretization(StationaryDiscretization):
def __init__(self, operator, rhs, dirichlet_operator, dirichlet_rhs, mean_value_corrector,
products=None, functionals=None, parameter_space=None, estimator=None, visualizer=None,
cache_region='disk', name=None):
super(ZeroMeanStationaryDiscretization, self).__init__(operator, rhs, products=products,
functionals=functionals, parameter_space=parameter_space,
estimator=estimator, visualizer=visualizer,
cache_region=cache_region, name=name)
self.dirichlet_operator = dirichlet_operator
self.dirichlet_rhs = dirichlet_rhs
self.mean_value_corrector = mean_value_corrector
def _solve(self, mu=None):
mu = self.parse_parameter(mu)
self.logger.info('Solving {} for {} ...'.format(self.name, mu))
U = self.dirichlet_operator.apply_inverse(self.rhs.as_vector(mu=mu), mu=mu)
return self.mean_value_corrector.apply(U)
def reduce_zero_mean_value_stationary(discretization, RB, coercivity_estimator=None, extends=None):
functionals = dict(discretization.functionals)
d = StationaryDiscretization(discretization.operator, discretization.rhs,
products=discretization.products, functionals=functionals,
parameter_space=discretization.parameter_space, cache_region=None,
name=discretization.name)
return reduce_stationary_coercive(d, RB, coercivity_estimator=coercivity_estimator, extends=extends)
class CellProblemRHSOperator(NumpyMatrixBasedOperator):
sparse = False
range = NumpyVectorSpace(1)
def __init__(self, grid, diffusion_function, dim_ind, name=None):
self.source = NumpyVectorSpace(grid.size(grid.dim))
self.grid = grid
self.diffusion_function = diffusion_function
self.dim_ind = dim_ind
self.name = name
self.build_parameter_type(inherits=(diffusion_function,))
def _assemble(self, mu=None):
g = self.grid
F = - self.diffusion_function(g.centers(0), mu=mu)
EI = np.zeros(g.dim)
EI[self.dim_ind] = 1.
if g.dim == 2:
SF_GRAD = np.array(([-1., -1.],
[1., 0.],
[0., 1.]))
elif g.dim == 1:
SF_GRAD = np.array(([-1.],
[1., ]))
else:
raise NotImplementedError
SF_GRADS = np.einsum('eij,pj->epi', g.jacobian_inverse_transposed(0), SF_GRAD)
# integrate the products of the function with the shape functions on each element
# -> shape = (g.size(0), number of shape functions)
SF_INTS = np.einsum('e,epi,i,e->ep', F, SF_GRADS, EI, g.volumes(0)).ravel()
# map local DOFs to global DOFs
SF_I = g.subentities(0, g.dim).ravel()
I = np.array(coo_matrix((SF_INTS, (np.zeros_like(SF_I), SF_I)), shape=(1, g.size(g.dim))).todense()).ravel()
return I.reshape((1, -1))
class DirichletOperator(NumpyMatrixBasedOperator):
def __init__(self, operator):
self.operator = operator
self.source = operator.source
self.range = operator.range
self.build_parameter_type(inherits=(operator,))
def _assemble(self, mu=None):
matrix = self.operator.assemble(mu)._matrix.tolil()
matrix[0] = 0.
matrix[0, 0] = 1.
return matrix.tocsc()
class DirichletFunctional(NumpyMatrixBasedOperator):
def __init__(self, operator):
self.operator = operator
self.source = operator.source
self.range = operator.range
self.build_parameter_type(inherits=(operator,))
def _assemble(self, mu=None):
matrix = self.operator.assemble(mu)._matrix.copy()
matrix[0, 0] = 0.
return matrix
def discretize_cell_problems(diffusion_functions, diffusion_functionals, diameter=1. / 100.):
dim = diffusion_functions[0].dim_domain
assert dim in (1, 2)
assert all(f.dim_domain == dim and f.shape_range == tuple() for f in diffusion_functions)
if dim == 1:
domain = CircleDomain([0., 1.])
grid, boundary_info = discretize_domain_default(domain, diameter=diameter)
visualizer = Matplotlib1DVisualizer(grid=grid, codim=1)
else:
domain = TorusDomain(([0., 0.], [1., 1.]))
grid, boundary_info = discretize_domain_default(domain, diameter=diameter, grid_type=TriaGrid)
visualizer = PatchVisualizer(grid=grid, bounding_box=grid.domain, codim=2)
operators = [DiffusionOperatorP1(grid, boundary_info, diffusion_function=f, name='diffusion_{}'.format(i))
for i, f in enumerate(diffusion_functions)]
operator = LincombOperator(operators, diffusion_functionals)
dirichlet_operator = DirichletOperator(operator)
mean_value_functional = L2ProductFunctionalP1(grid, ConstantFunction(1., dim_domain=dim), order=1,
name='mean_value_functional')
constant_projection = Concatenation(VectorOperator(NumpyVectorArray(np.ones(grid.size(dim))), copy=False),
mean_value_functional)
mean_value_corrector = IdentityOperator(constant_projection.source) - constant_projection
mean_value_corrector.unlock()
inject_sid(mean_value_corrector, 'cell_problem_mean_value_corrector', grid)
ones = NumpyVectorArray(np.ones(grid.size(dim)))
def make_diffusion_integral(f):
op = ConstantOperator(L2ProductFunctionalP1(grid, f, order=1).apply(ones), source=operator.source)
op.unlock()
inject_sid(op, 'cell_problem_diffusion_integral', f, grid)
return op
diffusion_integrals = [make_diffusion_integral(f) for f in diffusion_functions]
diffusion_integral = LincombOperator(diffusion_integrals, diffusion_functionals)
rhss = []
for dim_ind in range(dim):
components = [CellProblemRHSOperator(grid, diffusion_function=f, dim_ind=dim_ind,
name='RHS_Functional_{}_{}'.format(dim_ind, i))
for i, f in enumerate(diffusion_functions)]
rhss.append(LincombOperator(components, diffusion_functionals))
discretizations = []
for dim_ind in range(dim):
rhs = rhss[dim_ind]
dirichlet_rhs = DirichletFunctional(rhs)
homogenized_diffusions = [(diffusion_integral - rhss[i] if i == dim_ind else rhss[i] * (-1.))
for i in range(dim)]
d = ZeroMeanStationaryDiscretization(operator, rhs, dirichlet_operator, dirichlet_rhs,
mean_value_corrector,
functionals={('diffusion', i): f
for i, f in enumerate(homogenized_diffusions)},
visualizer=visualizer, name='CellProblem_{}'.format(dim_ind))
discretizations.append(d)
return discretizations, {'grid': grid, 'boundary_info': boundary_info}
| {
"repo_name": "sdrave/mfo-tutorial",
"path": "cellproblems.py",
"copies": "1",
"size": "8251",
"license": "bsd-2-clause",
"hash": -2903402919569150500,
"line_mean": 45.3539325843,
"line_max": 120,
"alpha_frac": 0.6299842443,
"autogenerated": false,
"ratio": 3.8163737280296024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9936246695724276,
"avg_score": 0.00202225532106539,
"num_lines": 178
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from six import PY3
from six.moves import range
from .hashfunctions import hash64
if PY3:
long = int
class HyperLogLog(object):
""" Basic Hyperloglog """
def __init__(self, error_rate):
b = int(np.ceil(np.log2((1.04 / error_rate) ** 2)))
self.precision = 64
self.alpha = self._get_alpha(b)
self.b = b
self.m = 1 << b
self.M = np.zeros(self.m, dtype=np.uint8)
self.bitcount_arr = [long(1) << i for i in range(self.precision - b + 1)]
@staticmethod
def _get_alpha(b):
if not (4 <= b <= 16):
raise ValueError("b=%d should be in range [4 : 16]" % b)
if b == 4:
return 0.673
if b == 5:
return 0.697
if b == 6:
return 0.709
return 0.7213 / (1.0 + 1.079 / (1 << b))
def _get_rho(self, w, arr):
""" Return the least signifiant bit
O(N) in the worst case
"""
lsb = 0
while not (w & arr[lsb]):
lsb += 1
return lsb + 1
def add(self, uuid):
""" Adds a key to the HyperLogLog """
if uuid:
# Computing the hash
try:
x = hash64(uuid)
except UnicodeEncodeError:
x = hash64(uuid.encode('ascii', 'ignore'))
# Finding the register to update by using the first b bits as an index
j = x & ((1 << self.b) - 1)
# Remove those b bits
w = x >> self.b
# Find the first 0 in the remaining bit pattern
self.M[j] = max(self.M[j], self._get_rho(w, self.bitcount_arr))
def __len__(self, M=None):
""" Returns the estimate of the cardinality """
return self.estimate()
def __or__(self, other_hll):
""" Perform a union with another HLL object. """
self.M = reduce(lambda x, y: np.maximum(x, y),
[self.M, other_hll.M]).astype(np.int16)
return self
def estimate(self):
""" Returns the estimate of the cardinality """
E = self.alpha * float(self.m ** 2) / np.power(2.0, - self.M).sum()
if E <= 2.5 * self.m: # Small range correction
V = self.m - np.count_nonzero(self.M)
return int(self.m * np.log(self.m / float(V))) if V > 0 else int(E)
# intermidiate range correction -> No correction
elif E <= float(long(1) << self.precision) / 30.0:
return int(E)
else:
return int(-(long(1) << self.precision) *
np.log(1.0 - E / (long(1) << self.precision)))
if __name__ == "__main__":
hll = HyperLogLog(0.01)
for i in range(100000):
hll.add(str(i))
print(len(hll))
| {
"repo_name": "Parsely/probably",
"path": "probably/hll.py",
"copies": "1",
"size": "2837",
"license": "mit",
"hash": 5403677793959691000,
"line_mean": 30.5222222222,
"line_max": 82,
"alpha_frac": 0.508635883,
"autogenerated": false,
"ratio": 3.4809815950920244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9488270627274118,
"avg_score": 0.0002693701635811539,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from six.moves import xrange
from .common import Benchmark
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
param_names = ['update']
def setup(self, update):
N = 150
Niter = 1000
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
def num_update(u, dx2, dy2):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
def num_inplace(u, dx2, dy2):
tmp = u[:(-2), 1:(-1)].copy()
np.add(tmp, u[2:, 1:(-1)], out=tmp)
np.multiply(tmp, dy2, out=tmp)
tmp2 = u[1:(-1), 2:].copy()
np.add(tmp2, u[1:(-1), :(-2)], out=tmp2)
np.multiply(tmp2, dx2, out=tmp2)
np.add(tmp, tmp2, out=tmp)
np.multiply(tmp, (1.0 / (2.0 * (dx2 + dy2))),
out=u[1:(-1), 1:(-1)])
def laplace(N, Niter=100, func=num_update, args=()):
u = np.zeros([N, N], order='C')
u[0] = 1
for i in range(Niter):
func(u, *args)
return u
func = {'inplace': num_inplace, 'normal': num_update}[update]
def run():
laplace(N, Niter, func, args=(dx2, dy2))
self.run = run
def time_it(self, update):
self.run()
class MaxesOfDots(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in xrange(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
For numpy it a join benchmark of dot products and max()
on a set of arrays.
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_app.py",
"copies": "1",
"size": "2745",
"license": "mit",
"hash": -2245241569779055400,
"line_mean": 30.1931818182,
"line_max": 78,
"alpha_frac": 0.4907103825,
"autogenerated": false,
"ratio": 3.3152173913043477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4305927773804348,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.platform import tf_logging as logging
class EarlyStopping(Callback):
""" Original implementation from keras, copied here with some
improvement.
Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
terminate_on_nan=True,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.terminate_on_nan = bool(terminate_on_nan)
self.restore_best_weights = restore_best_weights
self.best_weights = None
self.best_epoch = None
if mode not in ['auto', 'min', 'max']:
logging.warning(
'EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
# terminate on NaN
if self.terminate_on_nan:
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
# restoring the best weights
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
print(
'Restoring model weights from the end of the best epoch #%d.' %
self.best_epoch)
self.model.set_weights(self.best_weights)
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
self.best_epoch = epoch
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print(
'Restoring model weights from the end of the best epoch #%d.' %
self.best_epoch)
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning(
'Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s', self.monitor,
','.join(list(logs.keys())))
return monitor_value
| {
"repo_name": "imito/odin",
"path": "odin/backend/keras_callbacks.py",
"copies": "1",
"size": "5241",
"license": "mit",
"hash": -2785436992071219000,
"line_mean": 33.2549019608,
"line_max": 79,
"alpha_frac": 0.6216370922,
"autogenerated": false,
"ratio": 3.9614512471655328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006974295127229153,
"num_lines": 153
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from toolz import concatv, drop, interleave
class SimplexPoint(object):
__slots__ = ('stencil', 'stepsize', 'halvings', 'index', 'is_reflect',
'is_doubled', 'simplex_key', 'point_key', 'point')
def __init__(self, point, parent, index, is_reflect=False, is_contract=False):
self.stencil = parent.stencil
self.stepsize = parent.stepsize
self.halvings = parent.halvings
self.index = index
self.is_reflect = is_reflect
self.is_doubled = is_reflect and parent.is_reflect and index == 1 and not parent.is_doubled
if is_contract:
self.stepsize = self.stencil.to_grid(0.5 * self.stepsize)
self.halvings += 1
elif self.is_doubled:
self.stepsize = self.stencil.to_grid(2 * self.stepsize)
self.halvings -= 1
self.simplex_key = self.stencil.get_simplex_key(parent, index, is_reflect)
self.point_key = self.stencil.get_point_key(point)
self.point = self.stencil.get_point(self.point_key)
@property
def simplex(self):
return self.stencil.get_simplex(self.simplex_key)
def get_points(self):
points = self.stencil.to_grid(self.point + self.stepsize * self.simplex)
return (SimplexPoint(x, self, i) for i, x in drop(1, enumerate(points)))
def get_reflections(self):
if self.index == 0 and self.is_reflect and not self.is_doubled:
return iter([])
points = self.stencil.to_grid(self.point - self.stepsize * self.simplex)
return (SimplexPoint(x, self, i, is_reflect=True) for i, x in enumerate(points))
def get_contractions(self):
if self.halvings > self.stencil.max_halvings:
return iter([])
points = self.stencil.to_grid(self.point + 0.5 * self.stepsize * self.simplex)
return (SimplexPoint(x, self, i, is_contract=True) for i, x in enumerate(points))
def __hash__(self):
return hash((self.point_key, self.simplex_key, self.index, self.halvings,
self.is_reflect, self.is_doubled))
def __eq__(self, other):
return (
self.point_key == other.point_key
and self.simplex_key == other.simplex_key
and self.index == other.index
and self.halvings == other.halvings
and self.is_reflect == other.is_reflect
and self.is_doubled == other.is_doubled
and self.stencil is other.stencil
)
def __repr__(self):
return type(self).__name__ + repr(self.point)[len('array'):]
class InitialSimplexPoint(object):
def __init__(self, stencil):
self.stencil = stencil
self.simplex = stencil.simplex
self.halvings = 0
self.stepsize = 1.0
self.is_reflect = False
self.is_doubled = False
class RightHandedSimplexStencil(object):
def __init__(self, dims, max_halvings):
self.dims = dims
self.simplex_intern = {}
self.point_intern = {}
self.point_cache = {}
self.max_halvings = max_halvings
self.gridsize = 2.**(-max_halvings-1)
r = np.arange(dims + 1)
self.indexers = np.stack([
np.concatenate([[i], r[:i], r[i+1:]])
for i in range(dims + 1)
])
self.simplex = np.concatenate(
[np.zeros((1, dims), dtype=np.int8), np.identity(dims, dtype=np.int8)],
axis=0
)
self.point = np.zeros(dims)
self.get_simplex_key(self, 0, False)
self.get_point_key(self.point)
self._stencil_points = [] # This serves as a cache for generated stencil points
self._stencil_iter = self._generate_stencil()
def get_simplex_key(self, parent, index, is_reflect):
simplex = parent.simplex
if index != 0:
simplex = (simplex - simplex[index])[self.indexers[index]]
if is_reflect:
simplex = -simplex
key = simplex.tostring()
if key in self.simplex_intern:
return self.simplex_intern[key]
self.simplex_intern[key] = key
return key
def get_simplex(self, key):
return np.fromstring(key, np.int8).reshape((self.dims + 1, self.dims))
def get_point_key(self, point):
key = point.tostring()
if key in self.point_cache:
return self.point_intern[key]
self.point_intern[key] = key
self.point_cache[key] = point
return key
def get_point(self, key):
return self.point_cache[key]
def to_grid(self, x):
return np.round(x / self.gridsize) * self.gridsize
def _generate_stencil(self):
init = InitialSimplexPoint(self)
point = SimplexPoint(self.point, init, 0)
seen = {point}
seen_add = seen.add
first_seen = {point.point_key}
first_seen_add = first_seen.add
stencil_points_append = self._stencil_points.append
for p in point.get_points():
stencil_points_append(p)
yield p
first_seen_add(p.point_key)
seen_add(p)
self_reflect = []
mirror_reflect = []
reflect = []
self_contract = [point]
contract = []
while True:
next_self_reflect = []
next_mirror_reflect = []
next_reflect = []
next_self_contract = []
next_contract = []
for p in concatv(
interleave(x.get_reflections() for x in self_reflect),
interleave(x.get_reflections() for x in mirror_reflect),
interleave(x.get_reflections() for x in reflect),
interleave(x.get_reflections() for x in self_contract),
interleave(x.get_reflections() for x in contract),
):
if p.point_key not in first_seen:
stencil_points_append(p)
yield p
first_seen_add(p.point_key)
seen_add(p)
next_reflect.append(p)
elif p not in seen:
seen_add(p)
if p.index == 0:
next_self_reflect.append(p)
elif p.index == 1:
next_mirror_reflect.append(p)
else:
next_reflect.append(p)
for p in concatv(
interleave(x.get_contractions() for x in self_reflect),
interleave(x.get_contractions() for x in mirror_reflect),
interleave(x.get_contractions() for x in reflect),
interleave(x.get_contractions() for x in self_contract),
interleave(x.get_contractions() for x in contract),
):
if p.point_key not in first_seen:
stencil_points_append(p)
yield p
first_seen_add(p.point_key)
seen_add(p)
next_contract.append(p)
elif p not in seen:
seen_add(p)
if p.index == 0:
next_self_contract.append(p)
else:
next_contract.append(p)
self_reflect = next_self_reflect
mirror_reflect = next_mirror_reflect
reflect = next_reflect
self_contract = next_self_contract
contract = next_contract
def generate_stencil_points(self):
return concatv(self._stencil_points, self._stencil_iter)
| {
"repo_name": "eriknw/dask-patternsearch",
"path": "dask_patternsearch/stencil.py",
"copies": "1",
"size": "7662",
"license": "bsd-3-clause",
"hash": 198489531210578140,
"line_mean": 36.5588235294,
"line_max": 99,
"alpha_frac": 0.5520751762,
"autogenerated": false,
"ratio": 3.6925301204819276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47446052966819274,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from toolz import merge, accumulate
from into import discover, convert, append, into
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from .core import rec_concatenate, Array, getem, get, names, from_array
from ..core import flatten
from ..compatibility import long
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/into.py",
"copies": "1",
"size": "1846",
"license": "bsd-3-clause",
"hash": -916139720945648400,
"line_mean": 25.3714285714,
"line_max": 71,
"alpha_frac": 0.6803900325,
"autogenerated": false,
"ratio": 3.21602787456446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9395017346840371,
"avg_score": 0.00028011204481792715,
"num_lines": 70
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from toolz import merge, partial
from ..base import tokenize
from .. import threaded
def _partial_fit(model, x, y, kwargs=None):
kwargs = kwargs or dict()
model.partial_fit(x, y, **kwargs)
return model
def fit(model, x, y, get=threaded.get, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
This method will be called on dask arrays in sequential order. Ideally
your rows are independent and identically distributed.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
assert x.ndim == 2
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, 'partial_fit')
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
name = 'fit-' + tokenize(model, x, y, kwargs)
dsk = {(name, -1): model}
dsk.update(dict(((name, i), (_partial_fit, (name, i - 1),
(x.name, i, 0),
(y.name, i), kwargs))
for i in range(nblocks)))
return get(merge(x.dask, y.dask, dsk), (name, nblocks - 1))
def _predict(model, x):
return model.predict(x)[:, None]
def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model : scikit learn classifier
x : dask Array
See docstring for ``da.learn.fit``
"""
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
xx = np.zeros((1, x.shape[1]), dtype=x.dtype)
dt = model.predict(xx).dtype
return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze()
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/learn.py",
"copies": "5",
"size": "3248",
"license": "bsd-3-clause",
"hash": -266483141824313950,
"line_mean": 30.2307692308,
"line_max": 79,
"alpha_frac": 0.5914408867,
"autogenerated": false,
"ratio": 3.3903966597077244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 104
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from .wrap import wrap, wrap_func_size_as_kwarg
"""
Univariate distributions
"""
wrap = wrap(wrap_func_size_as_kwarg)
random = wrap(np.random.random)
beta = wrap(np.random.beta)
binomial = wrap(np.random.binomial)
chisquare = wrap(np.random.chisquare)
exponential = wrap(np.random.exponential)
f = wrap(np.random.f)
gamma = wrap(np.random.gamma)
geometric = wrap(np.random.geometric)
gumbel = wrap(np.random.gumbel)
hypergeometric = wrap(np.random.hypergeometric)
laplace = wrap(np.random.laplace)
logistic = wrap(np.random.logistic)
lognormal = wrap(np.random.lognormal)
logseries = wrap(np.random.logseries)
negative_binomial = wrap(np.random.negative_binomial)
noncentral_chisquare = wrap(np.random.noncentral_chisquare)
noncentral_f = wrap(np.random.noncentral_f)
normal = wrap(np.random.normal)
pareto = wrap(np.random.pareto)
poisson = wrap(np.random.poisson)
power = wrap(np.random.power)
rayleigh = wrap(np.random.rayleigh)
triangular = wrap(np.random.triangular)
uniform = wrap(np.random.uniform)
vonmises = wrap(np.random.vonmises)
wald = wrap(np.random.wald)
weibull = wrap(np.random.weibull)
zipf = wrap(np.random.zipf)
"""
Standard distributions
"""
standard_cauchy = wrap(np.random.standard_cauchy)
standard_exponential = wrap(np.random.standard_exponential)
standard_gamma = wrap(np.random.standard_gamma)
standard_normal = wrap(np.random.standard_normal)
standard_t = wrap(np.random.standard_t)
"""
TODO: Multivariate distributions
dirichlet =
multinomial =
"""
| {
"repo_name": "esc/dask",
"path": "dask/array/random.py",
"copies": "5",
"size": "1570",
"license": "bsd-3-clause",
"hash": 1508854771252968200,
"line_mean": 27.0357142857,
"line_max": 64,
"alpha_frac": 0.7675159236,
"autogenerated": false,
"ratio": 2.9734848484848486,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.6241000772084848,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import blz
from dynd import nd
import datashape
from . import DDesc, Capabilities
from .dynd_data_descriptor import DyND_DDesc
from shutil import rmtree
# WARNING! BLZ always return NumPy arrays when doing indexing
# operations. This is why DyND_DDesc is used for returning
# the values here.
def blz_descriptor_iter(blzarr):
for i in range(len(blzarr)):
# BLZ doesn't have a convenient way to avoid collapsing
# to a scalar, this is a way to avoid that
el = np.array(blzarr[i], dtype=blzarr.dtype)
yield DyND_DDesc(nd.array(el))
class BLZ_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a BLZ array.
"""
def __init__(self, path=None, mode='r', **kwargs):
self.path = path
self.mode = mode
self.kwargs = kwargs
if isinstance(path, blz.barray):
self.blzarr = path
elif mode != 'w':
self.blzarr = blz.barray(rootdir=path, mode=mode, **kwargs)
else:
# This will be set in the constructor later on
self.blzarr = None
@property
def dshape(self):
# This cannot be cached because the BLZ can change the dshape
obj = self.blzarr
return datashape.from_numpy(obj.shape, obj.dtype)
@property
def capabilities(self):
"""The capabilities for the BLZ arrays."""
if self.blzarr is None:
persistent = False
else:
persistent = self.blzarr.rootdir is not None,
return Capabilities(
# BLZ arrays can be updated
immutable = False,
# BLZ arrays are concrete
deferred = False,
# BLZ arrays can be either persistent of in-memory
persistent = persistent,
# BLZ arrays can be appended efficiently
appendable = True,
remote = False,
)
def __array__(self):
return np.array(self.blzarr)
def __len__(self):
# BLZ arrays are never scalars
return len(self.blzarr)
def __getitem__(self, key):
blzarr = self.blzarr
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
return DyND_DDesc(nd.asarray(blzarr[key], access='readonly'))
def __setitem__(self, key, value):
# We decided that BLZ should be read and append only
raise NotImplementedError
def __iter__(self):
return blz_descriptor_iter(self.blzarr)
# This is not part of the DDesc interface itself, but can
# be handy for other situations not requering full compliance with
# it.
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
self.blzarr.append(values_arr.reshape(shape_vals))
self.blzarr.flush()
def iterchunks(self, blen=None, start=None, stop=None):
"""Return chunks of size `blen` (in leading dimension).
Parameters
----------
blen : int
The length, in rows, of the buffers that are returned.
start : int
Where the iterator starts. The default is to start at the
beginning.
stop : int
Where the iterator stops. The default is to stop at the end.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arays of
homogeneous or structured types, depending on whether
`self.original` is a barray or a btable object.
See Also
--------
wherechunks
"""
# Return the iterable
return blz.iterblocks(self.blzarr, blen, start, stop)
def wherechunks(self, expression, blen=None, outfields=None, limit=None,
skip=0):
"""Return chunks fulfilling `expression`.
Iterate over the rows that fullfill the `expression` condition
on Table `self.original` in blocks of size `blen`.
Parameters
----------
expression : string or barray
A boolean Numexpr expression or a boolean barray.
blen : int
The length of the block that is returned. The default is the
chunklen, or for a btable, the minimum of the different column
chunklens.
outfields : list of strings or string
The list of column names that you want to get back in results.
Alternatively, it can be specified as a string such as 'f0 f1' or
'f0, f1'. If None, all the columns are returned.
limit : int
A maximum number of elements to return. The default is return
everything.
skip : int
An initial number of elements to skip. The default is 0.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arrays made of
structured types (or homogeneous ones in case `outfields` is a
single field.
See Also
--------
iterchunks
"""
# Return the iterable
return blz.whereblocks(self.blzarr, expression, blen, outfields,
limit, skip)
def remove(self):
"""Remove the persistent storage."""
if self.capabilities.persistent:
rmtree(self.path)
| {
"repo_name": "talumbau/blaze",
"path": "blaze/datadescriptor/blz_data_descriptor.py",
"copies": "1",
"size": "5763",
"license": "bsd-3-clause",
"hash": 5885715087383946000,
"line_mean": 31.9314285714,
"line_max": 77,
"alpha_frac": 0.5918792296,
"autogenerated": false,
"ratio": 4.234386480529023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002095063403819164,
"num_lines": 175
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import blz
from dynd import nd
import datashape
from . import IDataDescriptor, Capabilities
from .dynd_data_descriptor import DyNDDataDescriptor
# WARNING! BLZ always return NumPy arrays when doing indexing
# operations. This is why DyNDDataDescriptor is used for returning
# the values here.
def blz_descriptor_iter(blzarr):
for i in range(len(blzarr)):
# BLZ doesn't have a convenient way to avoid collapsing
# to a scalar, this is a way to avoid that
el = np.array(blzarr[i], dtype=blzarr.dtype)
yield DyNDDataDescriptor(nd.array(el))
class BLZDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes a BLZ array.
"""
def __init__(self, obj):
# This is a low level interface, so strictly
# require a BLZ barray here
if not isinstance(obj, blz.barray):
raise TypeError(('object is not a blz array, '
'it has type %r') % type(obj))
self.blzarr = obj
@property
def dshape(self):
# This cannot be cached because the BLZ can change the dshape
obj = self.blzarr
return datashape.from_numpy(obj.shape, obj.dtype)
@property
def capabilities(self):
"""The capabilities for the BLZ arrays."""
return Capabilities(
# BLZ arrays can be updated
immutable = False,
# BLZ arrays are concrete
deferred = False,
# BLZ arrays can be either persistent of in-memory
persistent = self.blzarr.rootdir is not None,
# BLZ arrays can be appended efficiently
appendable = True,
remote = False,
)
def __array__(self):
return np.array(self.blzarr)
def __len__(self):
# BLZ arrays are never scalars
return len(self.blzarr)
def __getitem__(self, key):
blzarr = self.blzarr
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
return DyNDDataDescriptor(nd.asarray(blzarr[key], access='readonly'))
def __setitem__(self, key, value):
# We decided that BLZ should be read and append only
raise NotImplementedError
def __iter__(self):
return blz_descriptor_iter(self.blzarr)
# This is not part of the DataDescriptor interface itself, but can
# be handy for other situations not requering full compliance with
# it.
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
self.blzarr.append(values_arr.reshape(shape_vals))
self.blzarr.flush()
def iterchunks(self, blen=None, start=None, stop=None):
"""Return chunks of size `blen` (in leading dimension).
Parameters
----------
blen : int
The length, in rows, of the buffers that are returned.
start : int
Where the iterator starts. The default is to start at the
beginning.
stop : int
Where the iterator stops. The default is to stop at the end.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arays of
homogeneous or structured types, depending on whether
`self.original` is a barray or a btable object.
See Also
--------
wherechunks
"""
# Return the iterable
return blz.iterblocks(self.blzarr, blen, start, stop)
def wherechunks(self, expression, blen=None, outfields=None, limit=None,
skip=0):
"""Return chunks fulfilling `expression`.
Iterate over the rows that fullfill the `expression` condition
on Table `self.original` in blocks of size `blen`.
Parameters
----------
expression : string or barray
A boolean Numexpr expression or a boolean barray.
blen : int
The length of the block that is returned. The default is the
chunklen, or for a btable, the minimum of the different column
chunklens.
outfields : list of strings or string
The list of column names that you want to get back in results.
Alternatively, it can be specified as a string such as 'f0 f1' or
'f0, f1'. If None, all the columns are returned.
limit : int
A maximum number of elements to return. The default is return
everything.
skip : int
An initial number of elements to skip. The default is 0.
Returns
-------
out : iterable
This iterable returns buffers as NumPy arrays made of
structured types (or homogeneous ones in case `outfields` is a
single field.
See Also
--------
iterchunks
"""
# Return the iterable
return blz.whereblocks(self.blzarr, expression, blen, outfields,
limit, skip)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/datadescriptor/blz_data_descriptor.py",
"copies": "3",
"size": "5451",
"license": "bsd-3-clause",
"hash": 3651742674395680000,
"line_mean": 33.06875,
"line_max": 77,
"alpha_frac": 0.5997064759,
"autogenerated": false,
"ratio": 4.295508274231678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6395214750131678,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import dask.array as da
try:
from dask.array import isin
except ImportError: # pragma: no cover
# Copied from dask v0.17.3.
# Used under the terms of Dask's license, see licenses/DASK_LICENSE.
def _isin_kernel(element, test_elements, assume_unique=False):
values = np.in1d(element.ravel(), test_elements,
assume_unique=assume_unique)
return values.reshape(element.shape + (1,) * test_elements.ndim)
def isin(element, test_elements, assume_unique=False, invert=False):
element = da.asarray(element)
test_elements = da.asarray(test_elements)
element_axes = tuple(range(element.ndim))
test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))
mapped = da.atop(_isin_kernel, element_axes + test_axes,
element, element_axes,
test_elements, test_axes,
adjust_chunks={axis: lambda _: 1
for axis in test_axes},
dtype=bool,
assume_unique=assume_unique)
result = mapped.any(axis=test_axes)
if invert:
result = ~result
return result
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/dask_array_compat.py",
"copies": "1",
"size": "1326",
"license": "apache-2.0",
"hash": 8776050326791888000,
"line_mean": 40.4375,
"line_max": 78,
"alpha_frac": 0.5844645551,
"autogenerated": false,
"ratio": 4.105263157894737,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5189727712994737,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import datashape as ds
import pytest
tb = pytest.importorskip('tables')
from odo import into
from odo.utils import tmpfile
from odo.backends.pytables import PyTables, discover
import os
try:
f = tb.open_file('import-tables-test.hdf5', mode='w')
f.close()
if os.path.exists('import-tables-test.hdf5'):
os.remove('import-tables-test.hdf5')
except tb.exceptions.HDF5ExtError as e:
pytest.skip('Cannot write file, error:\n%s' % e)
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@pytest.yield_fixture
def tbfile():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.close()
f.close()
yield filename
now = np.datetime64('now').astype('datetime64[us]')
raw_dt_data = [(1, 'Alice', 100, now),
(2, 'Bob', -200, now),
(3, 'Charlie', 300, now),
(4, 'Denis', 400, now),
(5, 'Edith', -500, now)]
dt_data = np.array(raw_dt_data, dtype=np.dtype([('id', 'i8'),
('name', 'S7'),
('amount', 'f8'),
('date', 'M8[ms]')]))
@pytest.yield_fixture
def dt_tb():
class Desc(tb.IsDescription):
id = tb.Int64Col(pos=0)
name = tb.StringCol(itemsize=7, pos=1)
amount = tb.Float64Col(pos=2)
date = tb.Time64Col(pos=3)
non_date_types = list(zip(['id', 'name', 'amount'], ['i8', 'S7', 'f8']))
# has to be in microseconds as per pytables spec
dtype = np.dtype(non_date_types + [('date', 'M8[us]')])
rec = dt_data.astype(dtype)
# also has to be a floating point number
dtype = np.dtype(non_date_types + [('date', 'f8')])
rec = rec.astype(dtype)
rec['date'] /= 1e6
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'dt', description=Desc)
d.append(rec)
d.close()
f.close()
yield filename
class TestPyTablesLight(object):
def test_read(self, tbfile):
t = PyTables(path=tbfile, datapath='/title')
shape = t.shape
t._v_file.close()
assert shape == (5,)
def test_write_no_dshape(self, tbfile):
with pytest.raises(ValueError):
PyTables(path=tbfile, datapath='/write_this')
def test_write_with_dshape(self, tbfile):
f = tb.open_file(tbfile, mode='a')
try:
assert '/write_this' not in f
finally:
f.close()
del f
# create our table
dshape = '{id: int, name: string[7, "ascii"], amount: float32}'
t = PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
shape = t.shape
filename = t._v_file.filename
t._v_file.close()
assert filename == tbfile
assert shape == (0,)
@pytest.mark.xfail(reason="Poor datetime support")
def test_table_into_ndarray(self, dt_tb):
t = PyTables(dt_tb, '/dt')
res = into(np.ndarray, t)
try:
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_ndarray_into_table(self, dt_tb):
dtype = ds.from_numpy(dt_data.shape, dt_data.dtype)
t = PyTables(dt_tb, '/out', dtype)
try:
res = into(
np.ndarray, into(t, dt_data, filename=dt_tb, datapath='/out'))
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
@pytest.mark.xfail(reason="Poor datetime support")
def test_datetime_discovery(self, dt_tb):
t = PyTables(dt_tb, '/dt')
lhs, rhs = map(discover, (t, dt_data))
t._v_file.close()
assert lhs == rhs
def test_node_discover(self, dt_tb):
root = PyTables(dt_tb, '/')
result = discover(root)
expected = ds.dshape("""{dt: 5 * {id: int64,
name: string[7, "A"],
amount: float64,
date: float64}}""")
assert result == expected.measure
root._v_file.close()
def test_no_extra_files_around(self, dt_tb):
""" check the context manager auto-closes the resources """
assert not len(tb.file._open_files)
| {
"repo_name": "ywang007/odo",
"path": "odo/backends/tests/test_pytables.py",
"copies": "1",
"size": "5252",
"license": "bsd-3-clause",
"hash": -5954882680826653000,
"line_mean": 31.825,
"line_max": 78,
"alpha_frac": 0.5158035034,
"autogenerated": false,
"ratio": 3.4758438120450035,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491647315445004,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import datashape as ds
import pytest
tb = pytest.importorskip('tables')
from odo import into, odo
from odo.utils import tmpfile
from odo.backends.pytables import PyTables, discover
import os
try:
f = tb.open_file('import-tables-test.hdf5', mode='w')
f.close()
if os.path.exists('import-tables-test.hdf5'):
os.remove('import-tables-test.hdf5')
except tb.exceptions.HDF5ExtError as e:
pytest.skip('Cannot write file, error:\n%s' % e)
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@pytest.yield_fixture
def tbfile():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.close()
f.close()
yield filename
now = np.datetime64('now').astype('datetime64[us]')
raw_dt_data = [(1, 'Alice', 100, now),
(2, 'Bob', -200, now),
(3, 'Charlie', 300, now),
(4, 'Denis', 400, now),
(5, 'Edith', -500, now)]
dt_data = np.array(raw_dt_data, dtype=np.dtype([('id', 'i8'),
('name', 'S7'),
('amount', 'f8'),
('date', 'M8[ms]')]))
@pytest.yield_fixture
def dt_tb():
class Desc(tb.IsDescription):
id = tb.Int64Col(pos=0)
name = tb.StringCol(itemsize=7, pos=1)
amount = tb.Float64Col(pos=2)
date = tb.Time64Col(pos=3)
non_date_types = list(zip(['id', 'name', 'amount'], ['i8', 'S7', 'f8']))
# has to be in microseconds as per pytables spec
dtype = np.dtype(non_date_types + [('date', 'M8[us]')])
rec = dt_data.astype(dtype)
# also has to be a floating point number
dtype = np.dtype(non_date_types + [('date', 'f8')])
rec = rec.astype(dtype)
rec['date'] /= 1e6
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'dt', description=Desc)
d.append(rec)
d.close()
f.close()
yield filename
class TestPyTablesLight(object):
def test_read(self, tbfile):
t = PyTables(path=tbfile, datapath='/title')
shape = t.shape
t._v_file.close()
assert shape == (5,)
def test_write_no_dshape(self, tbfile):
with pytest.raises(ValueError):
PyTables(path=tbfile, datapath='/write_this')
def test_write_with_dshape(self, tbfile):
f = tb.open_file(tbfile, mode='a')
try:
assert '/write_this' not in f
finally:
f.close()
del f
# create our table
dshape = '{id: int, name: string[7, "ascii"], amount: float32}'
t = PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
shape = t.shape
filename = t._v_file.filename
t._v_file.close()
assert filename == tbfile
assert shape == (0,)
@pytest.mark.xfail(reason="Poor datetime support")
def test_table_into_ndarray(self, dt_tb):
t = PyTables(dt_tb, '/dt')
res = into(np.ndarray, t)
try:
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_ndarray_into_table(self, dt_tb):
dtype = ds.from_numpy(dt_data.shape, dt_data.dtype)
t = PyTables(dt_tb, '/out', dtype)
try:
res = into(
np.ndarray, into(t, dt_data, filename=dt_tb, datapath='/out'))
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
@pytest.mark.xfail(reason="Poor datetime support")
def test_datetime_discovery(self, dt_tb):
t = PyTables(dt_tb, '/dt')
lhs, rhs = map(discover, (t, dt_data))
t._v_file.close()
assert lhs == rhs
def test_node_discover(self, dt_tb):
root = PyTables(dt_tb, '/')
result = discover(root)
expected = ds.dshape("""{dt: 5 * {id: int64,
name: string[7, "A"],
amount: float64,
date: float64}}""")
assert result == expected.measure
root._v_file.close()
def test_no_extra_files_around(self, dt_tb):
""" check the context manager auto-closes the resources """
assert not len(tb.file._open_files)
def test_pytables_to_csv():
ndim = 2
with tmpfile('.h5') as fn:
h5file = tb.openFile(fn, mode='w', title="Test Array")
h5file.createArray('/', "test", np.zeros((ndim, ndim), dtype=float))
h5file.close()
with tmpfile('csv') as csv:
t = odo('pytables://%s::/test' % fn, csv)
assert odo(t, list) == [(0.0, 0.0), (0.0, 0.0)]
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/tests/test_pytables.py",
"copies": "4",
"size": "5644",
"license": "bsd-3-clause",
"hash": -6090682938617079000,
"line_mean": 32.0058479532,
"line_max": 78,
"alpha_frac": 0.5157689582,
"autogenerated": false,
"ratio": 3.428918590522479,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5944687548722479,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import gsd.hoomd
import sklearn
import scipy.optimize as opt
import os
import os.path
import pdb
from sklearn.neighbors import BallTree
from sklearn.neighbors import radius_neighbors_graph
from scipy.spatial.distance import cdist,pdist
from scipy.special import erf
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix,lil_matrix,coo_matrix
#from .due import due, Doi
from .smoluchowski import massAvSize
from mpi4py import MPI
from cdistances import conOptDistanceCython,alignDistancesCython,subsquashRNG
from cdistances import squashRNGCOOCython,gyrTensxyCy
__all__ = ["ClusterSnapshot", "ContactClusterSnapshot",
"OpticalClusterSnapshot","AlignedClusterSnapshot",
"ContactClusterSnapshotXTC","OpticalClusterSnapshotXTC",
"SnapSystem",
"conOptDistance","conOptDistanceC","alignedDistance",
"alignedDistanceC","fixMisplacedArom","checkSymmetry",
"squashRNG","squashRNGCython","squashRNGPy","squashRNGCOO",
"squashRNGCOOCython","getIndsCsr"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
'''
due.cite(Doi("10.1167/13.9.30"),
description="Simple data analysis for clustering application",
tags=["data-analysis","clustering"],
path='clustering')
'''
def checkSymmetry(csr):
"""
Checks whether a matrix in CSR sparse format is symmetric.
Parameters
----------
csr: matrix in CSR format
Returns
-------
symyes: bool
True if symmetric, False if not
"""
symyes = not (csr!=csr.transpose()).max()
return symyes
def getIndsCsr(csr):
"""
Gets the indices of all entries in a csr matrix
----------
Parameters
----------
csr: matrix in CSR format
-------
Returns
-------
bonds: 2 x M matrix
list of (i,j) indices corresponding to each of the M entries in the CSR
matrix
Raises
------
TypeError: if csr is not in csr format
"""
if type(csr) != csr_matrix:
raise TypeError("Argument of getIndsCsr should be a CSR matrix!")
ia = csr.indptr
ja = csr.indices
cii = 0
bi = 0
m = len(ja)
bonds = np.zeros((m,2))
for i in range(1,len(ia)):
ninrow = ia[i]-ia[i-1]
row = i-1
for n in range(cii,cii+ninrow):
col = ja[n]
bonds[bi,0] = row
bonds[bi,1] = col
bi += 1
cii += ninrow
return bonds
def squashRNG(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
for i in range(0,newsh[0]):
for j in range(i+1,newsh[1]):
subrng = rng[apermol*i:apermol*(i+1),apermol*j:apermol*(j+1)]
if subrng.max():
molrng[i,j] = 1.0
molrng[j,i] = 1.0
return csr_matrix(molrng)
def squashRNGCOO(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms.
Uses COO format
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
molrng = lil_matrix(newsh)
rng = coo_matrix(rng)
rows = rng.row//apermol
cols = rng.col//apermol
rowcols = rows * molrng.shape[1] + cols
urowcols = np.unique(rowcols)
rows = urowcols // molrng.shape[1]
cols = urowcols % molrng.shape[1]
#pdb.set_trace()
for i in range(len(rows)):
row = rows[i]
col = cols[i]
if col > row:
molrng[row,col] = 1
#pdb.set_trace()
return csr_matrix(molrng)
def squashRNGCython(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms, but uses Cython code to improve speed.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNG(rng,molrng,apermol)
return csr_matrix(molrng)
def squashRNGPy(rng,apermol):
"""
Reduces radius neighbors graph to a new graph based on molecules instead of
atoms. Dummy python debug test of Cython algorithm.
Parameters
----------
rng: a graph in CSR format as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph in CSR format
Raises
------
RuntimeError: if the original rng is not symmetric
"""
if not checkSymmetry(rng):
raise RuntimeError("Graph is non-symmetrical")
sh = rng.shape
rng = rng.toarray()
newsh = (int(sh[0]/apermol),int(sh[1]/apermol))
#pdb.set_trace()
#molrng = lil_matrix(newsh)
molrng = np.zeros(newsh)
molrng = subsquashRNGPy(rng,molrng,apermol)
#pdb.set_trace()
return csr_matrix(molrng)
def subsquashRNGPy(rng,molrng,apermol):
"""
Python version of c algorithm that sets the block to 0 when all are 0
and 1 if at least 1 is 1
Parameters
----------
rng: a numpy array as produced by a BallTree
apermol: int
the number of atoms in a molecule
Returns
-------
molrng: a new graph
"""
dim = np.shape(molrng)[0]
sz = np.shape(rng)
rng = rng.reshape((1,sz[0]*sz[1]))[0]
molrng = molrng.reshape((1,dim*dim))[0]
for i in range(dim):
for j in range(i+1,dim):
istart = apermol*i;
iend = apermol*(i+1);
jstart = apermol*j;
jend = apermol*(j+1);
curr = 0;
#pdb.set_trace()
for k in range(istart,iend):
for m in range(jstart,jend):
if (rng[k*dim*apermol+m] != 0.):
curr = 1;
#pdb.set_trace()
if (curr == 1):
molrng[dim*i+j] = 1.0;
molrng[dim*j+i] = 1.0;
molrng = molrng.reshape((dim,dim))
return molrng
def fixMisplacedArom(gsdfile,gsdout,idMiss,idPartner,idNotMiss,idNotPartner
,molno,ats,ts):
"""
opens a gsd file, gets the trajectory, then writes out in place with
the incorrectly placed aromatic placed correctly
Parameters
----------
gsdfile: string
filename of the file to be rewritten
gsdout: string
where to write new stuff
idMiss: the id of the misplaced aromatic within the molecule
idPartner: the id of the partner to the misplaced aromatic within the mol
idNotMiss: the complementary correctly placed aromatic
idNotPartner: idNotMiss's partner
ts: which timesteps of the trajectory to rewrite
Notes
-----
pos(idMiss) = pos(idPartner) + (pos(idNotMiss) - pos(idNotPartner))
"""
traj = gsd.hoomd.open(gsdfile)
trajnew = gsd.hoomd.open(gsdout,'wb')
offset = molno
idMisses = offset+idMiss + np.arange(0,molno*(ats-1),ats-1)
idPartners = offset + idPartner + np.arange(0,molno*(ats-1),ats-1)
idNotMisses = offset + idNotMiss + np.arange(0,molno*(ats-1),ats-1)
idNotPartners = offset + idNotPartner + np.arange(0,molno*(ats-1),ats-1)
for t in ts:
snapshot = traj[t]
box = snapshot.configuration.box[0:3]
pos = snapshot.particles.position
pds = pos[idNotMisses] - pos[idNotPartners]
pds = pds - np.around(pds / box) * box
pos[idMisses] = pos[idPartners] + pds
snapnew = snapshot
snapnew.particles.position = pos
trajnew.append(snapnew)
def conOptDistance(x,y):
"""
Function that computes the distance between molecules for contact
or optical clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = len(x)/3
xa = np.reshape(x,[ats,3])
ya = np.reshape(y,[ats,3])
#return np.min(euclidean_distances(xa,ya,squared=True))
return np.min(cdist(xa,ya,metric='sqeuclidean'))
def conOptDistanceC(x,y):
"""
Function that computes the distance between molecules for contact
or optical clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Notes
-----
Uses scipy.weave to incorporate a little bit of C code to see if that
will speed things up
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
#xa = np.reshape(x,[ats,3])
#ya = np.reshape(y,[ats,3])
mind = 10000.0
support = '#include <math.h>'
code = """
int i,j;
return_val = 0;
double d;
for (i = 0; i < Nx[0]/3; i++) {
for (j = 0; j < Nx[0]/3; j++) {
d = (x[3*i] - y[3*j]) * (x[3*i] - y[3*j])
+ (x[3*i + 1] - y[3*j + 1]) * (x[3*i + 1] - y[3*j + 1])
+ (x[3*i + 2] - y[3*j + 2]) * (x[3*i + 2] - y[3*j + 2]);
if (d < mind){
mind = d;
}
}
}
return_val = mind;
"""
mind = weave.inline(code,['x', 'y', 'mind'],
support_code = support, libraries = ['m'])
#return np.min(euclidean_distances(xa,ya,squared=True))
return mind
def alignedDistance(x,y):
"""
Function that computes the distances between molecules for aligned clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Raises
------
RuntimeError
if the array does not have a number of entries divisible by three
because it's supposed to be a flattened array of positions
Notes
-----
Compute the minimum distance of each COM to another COM
Take the three minimum distances of this list
Return the maximum of these three
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = int(len(x)/3)
xa = np.reshape(x,[ats,3])
ya = np.reshape(y,[ats,3])
distmat = cdist(xa,ya,metric='sqeuclidean')
dists = np.zeros([ats * ats, 3])
dind = 0
for i in range(ats):
for j in range(ats):
dists[dind,0] = distmat[i,j]
dists[dind,1] = i
dists[dind,2] = j
dind += 1
sdists = dists[dists[:,0].argsort()]
i1 = sdists[0,1]
j1 = sdists[0,2]
i2 = i1
j2 = j1
ind2 = 1
while (i2 == i1) or (j2 == j1):
i2 = sdists[ind2,1]
j2 = sdists[ind2,2]
ind2 += 1
ind3 = ind2
i3 = sdists[ind3,1]
j3 = sdists[ind3,2]
while (i3 == i1) or (i3 == i2) or (j3 == j1) or (j3 == j2):
i3 = sdists[ind3,1]
j3 = sdists[ind3,2]
ind3 += 1
return sdists[ind3-1,0]
def alignedDistanceC(x,y):
"""
Function that computes the distances between molecules for aligned clusters
Parameters:
-----------
x : array
The 1D array of size 3*ats representing the first molecule
y : array
The 1D array of size 3*ats representing the second molecule
Returns
-------
r : float
The distance between x and y computed as the minimum distance
between any two beads in the molecules
Raises
------
RuntimeError
if the array does not have a number of entries divisible by three
because it's supposed to be a flattened array of positions
Notes
-----
Compute the minimum distance of each COM to another COM
Take the three minimum distances of this list
Return the maximum of these three
Use scipy.weave and C++ to speed things up
"""
if len(x) % 3 != 0 or len(y) % 3 != 0:
raise RuntimeError("3D array has a number of entries not divisible \
by 3.")
ats = int(len(x)/3)
dists = np.zeros([ats * ats])
distsA = np.zeros([ats * ats])
distsB = np.zeros([ats * ats])
support = '#include <math.h>'
code = """
int i,j,dind = 0;
return_val = 0;
for (i = 0; i < ats; i++){
for (j = 0; j < ats; j++){
dists[dind] = (x[3 * i] - y[3 * j]) * (x[3 * i] - y[3 * j])
+ (x[3 * i + 1] - y[3 * j + 1]) * (x[3 * i + 1] - y[3 * j + 1])
+ (x[3 * i + 2] - y[3 * j + 2]) * (x[3 * i + 2] - y[3 * j + 2]);
distsA[dind] = i;
distsB[dind] = j;
dind++;
}
}
double mind = 10000.0;
int mindi, mindj;
for (int k = 0; k < ats * ats; k++){
if (dists[k] < mind){
mind = dists[k];
mindi = distsA[k];
mindj = distsB[k];
}
}
double mind2 = 10000.0;
int mind2i, mind2j;
for (int k = 0; k < ats * ats; k++){
if ((dists[k] < mind2) && (distsA[k] != mindi) && (distsB[k] != mindj))
{
mind2 = dists[k];
mind2i = distsA[k];
mind2j = distsB[k];
}
}
double mind3 = 10000.0;
for (int k = 0; k < ats * ats; k++){
if ((dists[k] < mind3) && (distsA[k] != mindi) && (distsB[k] != mindj)
&& (distsA[k] != mind2i) && (distsB[k] != mind2j)){
mind3 = dists[k];
}
}
return_val = mind3;
"""
mind3 = weave.inline(code,['x', 'y','dists','distsA','distsB','ats'],
support_code = support, libraries = ['m'])
return mind3
def fixCoords(pos,posinit,box):
"""
fixes all coords based on the initial coordinate and
the periodic boundary conditions
Parameters
----------
pos: 1 x 3*ats numpy array
positions of all the beads in the molecule
posinit: 1 x 3 numpy array
initial position on which the fixing is based
box: 1 x 3 numpy array
box dimensions
"""
for i in range(int(len(pos)/3)):
#pdb.set_trace()
dr = pos[3*i:3*i+3] - posinit
dr = dr - box*np.round(dr/box)
pos[3*i:3*i+3] = dr + posinit
return pos
class SnapSystem(object):
"""Class for running the full suite of analysis software """
def __init__(self, traj, ats, molno, cldict,
clfunc={'contact':conOptDistanceCython,
'optical':conOptDistanceCython,
'aligned':alignDistancesCython},
compairs=np.array([[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]),
atype=u'LS',ttotal=-1,tstart=0,tpr=None,outGro='conf',
het=False,typelist=None):
""" Initialize a full system of gsd snapshots over a trajectory.
Parameters
----------
traj: a gsd.hoomd trajectory or a gro or an xtc file name
ats: dictionary
the number of beads in a single molecule for each cluster type
molno: int
the number of molecules in the system
cldict: dictionary
keys are strings representing cluster types, ie contact, optical,
aligned. values are cutoffs
clfunc: dictionary
keys are strings representing cluster types. values are
functions for distance computation
compairs: numpy array
for finding COM of aromatics for aligned clusters
atype: label
referring to how the aromatic beads are labeled in the trajectory
ttotal: int
the total length of the trajectory to be studied
if -1, assume it is the same as the length of the provided
trajectory
tstart: int
timestep to start at, defaults to zero
(last timestep = tstart + ttotal)
tpr: string
name of tpr file, used only with xtc trajectory
outGro: string
name of file to safe individual gro files to
het: bool
whether the system is heterogeneous or not, False by default
Attributes
----------
mpi: bool
True if the system can run in MPI, false if it has to run serially
trajectory: gsd.hoomd trajectory
the trajectory of snapshots in the system
ats: int
the number of beads per molecule
molno: int
the number of molecules in the system
cldict: dict
keys are strings representing cluster types, ie contact, optical,
aligned. values are cutoffs
clsnaps: list of lists of clusterSnapshots
a list for each cluster type in the dictionary
each list is the snapshot at each timestep of the appropriate
type. If mpi is True, then this list is padded with dummy clusters
with NaN positions to make Scatter work correctly.
atype = label
referring to how aromatic beads are labeled in the trajectory
typelist: list
list of different types of central beads, by default None
comm: MPI communicator
------
Raises
------
NotImplementedError:
- if traj isn't a hoomd trajectory or a file ending
in xtc or gro
- if self.mpi is set to true for non hoomd stuff
ValueError:
- if tpr is set to None with an xtc file
Notes
-----
Allows for MPI implementation of system if the size of the
MPI communicator is greater than 1 AND it's a gsd system rather than
an XTC one
"""
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
self.comm = comm
if size > 1:
self.mpi = True
else:
self.mpi = False
#pdb.set_trace()
if (type(traj) is not str) and (type(traj) is not gsd.hoomd.HOOMDTrajectory):
raise NotImplementedError("Invalid trajectory type")
if (type(traj) is gsd.hoomd.HOOMDTrajectory):
if self.mpi:
raise NotImplementedError("MPI is only available for HOOMD trajectory types")
if (type(traj) is str):
spl = traj.split('.')
ext = spl[len(spl)-1]
if ext != 'gro' and ext != 'xtc':
raise NotImplementedError("Invalid trajectory type")
if ext == 'xtc' and tpr is None:
raise ValueError("tpr must have a value for xtc trajectories")
self.trajectory = traj
self.ats = ats
self.molno = molno
self.cldict = cldict
self.clfunc = clfunc
self.clsnaps = {}
self.atype = atype
if ttotal == -1:
ttotal = len(traj)
if self.mpi:
rank = comm.Get_rank()
num = int(np.floor(ttotal / size))
rem = ttotal % size
if rank == 0:
tslist = np.zeros((num + 1) * size).astype(int)
currid = 0
for r in range(size):
if rem != 0:
if r < rem:
ts = r * (num + 1) + np.arange(num + 1) + tstart
tslist[currid:(len(ts)+currid)] = ts
else:
ts = r * (num + 1) - (r - rem) + np.arange(num) + tstart
tslist[currid:(len(ts)+currid)] = ts
tslist[(len(ts)+currid):(len(ts) \
+ currid + (r-rem)+1)] = -1
currid += num + 1
else:
tslist = np.arange(num * size) + tstart
for ctype in cldict.keys():
if ctype == 'contact':
clusters = [ContactClusterSnapshot(t,traj,ats[ctype],
molno) \
for t in tslist]
elif ctype == 'optical':
clusters = [OpticalClusterSnapshot(t,traj,ats[ctype],
molno,
atype=atype) \
for t in tslist]
elif ctype == 'aligned':
clusters = [AlignedClusterSnapshot(t,traj,ats[ctype],
molno,
compairs=compairs,
atype=atype) \
for t in tslist]
else:
raise NotImplementedError("Unknown cluster type")
self.clsnaps[ctype] = clusters
else:
for ctype in cldict.keys():
if ctype == 'contact':
if type(traj) is str:
if het:
raise NotImplementedError("No code instantiated for heterogeneous system in Gromacs yet.")
if ext == 'gro':
clusters = [ContactClusterSnapshotXTC(t, traj, ats,
molno) \
for t in range(tstart,ttotal+tstart)]
else:
#pdb.set_trace()
flag = False
for t in range(tstart,ttotal+tstart):
if not os.path.isfile(outGro+str(t)+'.gro'):
flag = True
break
if flag:
grocall = \
'echo 0 | trjconv -s {0} -f {1} -o {2}.gro -sep'.format(tpr,traj,outGro)
os.system(grocall)
clusters = [ContactClusterSnapshotXTC(t,
outGro+str(t)+'.gro',ats,molno) \
for t in range(tstart,ttotal+tstart)]
else:
if het:
clusters = [ContactClusterHeteroSnapshot(t,traj,
ats[ctype],
molno,
typelist)\
for t in range(tstart,ttotal+tstart)]
else:
clusters = \
[ContactClusterSnapshot(t,traj,ats[ctype],molno) \
for t in range(tstart,ttotal+tstart)]
elif ctype == 'optical':
if type(traj) is str:
if het:
raise NotImplementedError("No heterogeneous implementation for Gromacs trajectories yet.")
if ext == 'gro':
clusters = [OpticalClusterSnapshotXTC(t,traj,ats,
molno,compairs) \
for t in range(tstart,ttotal+tstart)]
else:
flag = False
for t in range(tstart,ttotal+tstart):
if not os.path.isfile(outGro+str(t)+'.gro'):
flag = True
break
if flag:
grocall = \
'echo 0 | trjconv -s {0} -f {1} -o {2}.gro -sep'.format(tpr,traj,outGro)
os.system(grocall)
clusters = [OpticalClusterSnapshotXTC(t,
outGro+str(t)+'.gro',ats,molno,
compairs) \
for t in range(tstart,ttotal+tstart)]
else:
if het:
clusters = [OpticalClusterHeteroSnapshot(t,traj,
ats[ctype],
molno,
typelist,
atype=atype) \
for t in range(tstart,ttotal+tstart)]
else:
clusters = \
[OpticalClusterSnapshot(t,traj,ats[ctype],molno,
atype=atype) \
for t in range(tstart,ttotal+tstart)]
elif ctype == 'aligned':
if type(traj) is str:
raise NotImplementedError("Aligned cluster only available for HOOMD type trajectories")
else:
clusters = \
[AlignedClusterSnapshot(t,traj,ats[ctype],molno,
compairs=compairs,
atype=atype) \
for t in range(tstart,ttotal+tstart)]
else:
raise NotImplementedError("Unknown cluster type")
self.clsnaps[ctype] = clusters
def get_clusters_mpi(self,ctype,ttotal=-1):
""" Compute the clusters in each snapshot of the trajectory, using
MPI parallelization.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
ttotal: int
number of timesteps to compute for
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet.
Notes
------
Partition up the snapshots, turn them into numpy arrays of
relevant data, and have each processor compute the cluster IDs,
which is the only step that takes ages right now. Once computed,
gather them all back up at root.
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
if ttotal == -1:
ttotal = len(self.trajectory)
num = int(np.floor(ttotal / size))
rem = ttotal % size
traj = self.trajectory
ats = self.ats
molno = self.molno
atype = self.atype
if ctype not in ['contact','optical','aligned']:
raise NotImplementedError('Unknown cluster type \
in get_clusters_mpi')
cutoff = self.cldict[ctype]
if rank == 0:
clusters = self.clsnaps[ctype]
carraylen = clusters[0].getCArrayLen()
clusterarray = np.zeros(carraylen * len(clusters))
cind = 0
for cls in clusters:
carray = cls.toArray()
clusterarray[(cind * carraylen):(cind * carraylen + carraylen)]\
= carray
cind += 1
else:
if ctype == 'contact':
tCSnap = ContactClusterSnapshot(0,traj,ats[ctype],molno)
elif ctype == 'optical':
tCSnap = OpticalClusterSnapshot(0,traj,ats[ctype],molno,
atype=atype)
elif ctype == 'aligned':
tCSnap = AlignedClusterSnapshot(0,traj,ats[ctype],molno,
atype=atype)
else:
tCSnap = ClusterSnapshot(0,traj,ats)
carraylen = tCSnap.getCArrayLen()
clusterarray = None
if rem == 0:
ncsnaps = num
else:
ncsnaps = num + 1
carray_local = np.zeros(ncsnaps * carraylen)
self.comm.Scatter(clusterarray,carray_local,root=0)
#for each local cluster array, turn it into a cluster, compute the
#clusterIDs, pack the whole thing up as an array again, and send back
#to root
for i in range(ncsnaps):
carrayi = carray_local[carraylen * i : (carraylen * i + carraylen)]
#print("From rank {0}, snap {1}, array{2}".format(rank,i,carrayi))
if not np.isnan(carrayi[4]):
if ctype == 'contact':
clustSnap = ContactClusterSnapshot(0,carrayi,ats[ctype],
molno)
elif ctype == 'optical':
clustSnap = OpticalClusterSnapshot(0,carrayi,ats[ctype],
molno,atype=atype)
elif ctype == 'aligned':
clustSnap = AlignedClusterSnapshot(0,carrayi,ats[ctype],
molno,atype=atype)
clustSnap.setClusterID(cutoff)
try:
carray_local[carraylen * i : (carraylen * i + carraylen)]\
= clustSnap.toArray()
except:
pdb.set_trace()
#print("Part 2: From rank {0}, snap {1}, array{2}".format(rank,i,carrayi))
self.comm.Barrier()
self.comm.Gather(carray_local,clusterarray,root=0)
if rank == 0:
ind = 0
nind = 0
while ind < ttotal:
carrayi = clusterarray[(carraylen * nind) : \
(carraylen * nind + carraylen)]
if not np.isnan(carrayi[4]):
if ctype == 'contact':
clustSnap = ContactClusterSnapshot(0,carrayi,
ats[ctype],molno)
elif ctype == 'optical':
clustSnap = OpticalClusterSnapshot(0,carrayi,
ats[ctype],molno,
atype=atype)
elif ctype == 'aligned':
clustSnap = AlignedClusterSnapshot(0,carrayi,
ats[ctype],molno,
atype=atype)
self.clsnaps[ctype][nind].clusterIDs = clustSnap.clusterIDs
#print("current pos: ",clustSnap.pos[0])
#print("current csizes: ",clustSnap.idsToSizes())
ind += 1
nind +=1
def get_clusters_serial(self,ctype,box,lcompute=None):
""" Compute the clusters in each snapshot of the trajectory, doing
so simply in serial.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
box: 3 x 1 numpy array
box side lengths
lcompute: string or None
if a string, this is the filename to write the length distributions
to after computation
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet.
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_serial.")
clusters = self.clsnaps[ctype]
cutoff = self.cldict[ctype]
func = self.clfunc[ctype]
if lcompute is not None:
lfile = open(lcompute,'w')
for clustSnap in clusters:
BT = clustSnap.setClusterID(cutoff)
if lcompute is not None:
ldistrib = clustSnap.getLengthDistribution(cutoff,box,func,
BT=BT)
for lmol in ldistrib:
lfile.write('{0} '.format(lmol))
lfile.write('\n')
if lcompute is not None:
lfile.close()
self.clsnaps[ctype] = clusters
def get_clusters_from_file(self,ctype,fname):
""" Compute the clusters in each snapshot of the trajectory from a
given file name, assuming serial.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
fname: string
file name where the cluster ID data is saved
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_from_file.")
clusters = self.clsnaps[ctype]
for clustSnap in clusters:
clustSnap.setClusterIDFromFile(fname)
self.clsnaps[ctype] = clusters
def getLengthDistribution(self,ctype,cutoff,box,func=conOptDistanceCython,writegsd=None,
writeldistrib=None):
""" Gets the length distribution at each timestep and optionally
writes it out to file.
Parameters
----------
ctype: string
cluster type (contact, optical, aligned, etc)
cutoff: float
Cutoff for BallTree computation for unwrapping
box: 1x3 numpy array
box side lengths
func: python function
distance metric for BallTree computation
writegsd: string or None
used as the base filename to write out all clusters as separate
gsd files. Mostly useful for debugging purposes.
writeldistrib: string or None
the filename to write out the length distributions of the clusters
Returns
-------
ldistribt: T x molno numpy array
contains the approximate end-end length that the cluster each
molecule participates in is at each timestep
Raises
------
NotImplementedError
If the cluster type isn't one that's been programmed yet
Notes
-----
Computes an approximation to the end-end length as the largest
distance between two participating COM beads. This is not the
best approximation if the aggregates are not very linear or if
they are linear but curl up a lot. It fails for a spanning cluster.
"""
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type \
in get_clusters_from_file.")
clusters = self.clsnaps[ctype]
ldistribt = np.zeros([len(self.trajectory),self.molno])
ind = 0
if writeldistrib is not None:
f = open(writeldistrib,'w')
for clustSnap in clusters:
ldistrib = clustSnap.getLengthDistribution(cutoff,box,func,
writegsd=writegsd)
ldistribt[ind,:] = ldistrib
if writeldistrib is not None:
for endendl in ldistrib:
f.write('{0} '.format(endendl))
f.write('\n')
ind += 1
if writeldistrib is not None:
f.close()
return ldistribt
def getMassAvVsTime(self,ctype,tstep=1):
""" Returns a numpy array of two columns, with time on the left and
mass-averaged cluster size on the right.
Parameters
----------
ctype: string
refers to cluster type for which to calculate this
tstep: float
converts timestep to some non-reduced value if desired
default is just 1
Returns
-------
mu2vtime: numpy array
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if self.comm.Get_rank() == 0:
if ctype not in self.cldict.keys():
raise NotImplementedError("Unknown cluster type.")
clsnaps = self.clsnaps[ctype]
mu2vtime = float('NaN')*np.ones([2,len(clsnaps)])
ind = 0
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
mu2vtime[0,ind] = ind * tstep
mu2vtime[1,ind] = massAvSize(clsnap.idsToSizes())
ind += 1
m1 = mu2vtime[0,np.where(~np.isnan(mu2vtime[0]))[0]]
m2 = mu2vtime[1,np.where(~np.isnan(mu2vtime[0]))[0]]
mu2vtime = np.array([m1,m2])
return mu2vtime
def writeCIDs(self,ctype,fname):
""" Write out the cluster indices as a file that can be opened
and loaded later
Parameters
----------
ctype: string
cluster type
fname: string
file name to write to
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in writeCIDs.")
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
cids = clsnap.clusterIDs
for cid in cids:
fid.write('{0} '.format(cid))
fid.write('\n')
fid.close()
def writeSizes(self,ctype,fname):
""" Write out the cluster sizes as a file that can be opened
and loaded later
Parameters
----------
ctype: string
cluster type
fname: string
file name to write to
Raises
------
NotImplementedError
If the cluster type is one that hasn't been programmed yet
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in writeSizes")
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
if not np.isnan(clsnap.pos[0][0]):
csizes = clsnap.idsToSizes()
for csize in csizes:
fid.write('{0} '.format(csize))
fid.write('\n')
fid.close()
def writeIntermix(self,ctype,fname,returnmat=False):
""" Write out the intermix values as a file that can be opened
and loaded later
----------
Parameters
----------
ctype: string
cluster type
fname:
file name to write to
returnmat: bool
whether to create and return a matrix of these values, useful for
debugging or further data analysis in python, default False
-------
Returns
-------
intermixlist:
list with T entries of arrays of varying size
------
Raises
------
NotImplementedError
if the cluster type is one that hasn't been programmed yet
-----
Notes
-----
writes to file R lines
where R = T*sum_i C_i(t), where C_i is the number of clusters at
type step i
column 1 is the timestep
column 2 is the cluster size
column 3 is the number of bonds between similar types
column 4 is the number of bonds between different types
column 5 is the number of total bonds (sum of 3 and 4)
"""
intermixlist = []
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in intermix")
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
t = 0
for clsnap in clsnaps:
intermix = clsnap.getIntermixByCluster()
if returnmat:
intermixlist.append(intermix)
for c in range(np.shape(intermix)[0]):
fid.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(t,
intermix[c,0],intermix[c,1],intermix[c,2],
intermix[c,3]))
t += 1
fid.close()
return intermixlist
def writeAngSpread(self,ctype,fname,ainds):
"""
Write to file the angle spread data for each (t,c) pair where t is
the timestep and c is the cluster size
----------
Parameters
----------
ctype: string
cluster type
fname: string
name of file to write to
ainds: list of ints
the indices of the aromatic beads
------
Raises
------
NotImplementedError:
if the cluster type is unknown
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in angspread")
cutoff = self.cldict[ctype]
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
angspreadmat = clsnap.angSpread(cutoff,ainds)
for i in range(np.shape(angspreadmat)[0]):
fid.write('{0}\t{1}\t{2}\t{3}\n'.format(clsnap.timestep,
angspreadmat[i,0],angspreadmat[i,1],
angspreadmat[i,2]))
fid.close()
def writeNNAngSpread(self,ctype,fname,ainds):
"""
Write to file the NN angle data for each (t,c) pair where t is
the timestep and c is the cluster size
----------
Parameters
----------
ctype: string
cluster type
fname: string
name of file to write to
ainds: list of ints
the indices of the aromatic beads
------
Raises
------
NotImplementedError:
if the cluster type is unknown
"""
if ctype not in self.clsnaps.keys():
raise NotImplementedError("Unknown cluster type in angspread")
cutoff = self.cldict[ctype]
if self.comm.Get_rank() == 0:
fid = open(fname,'w')
clsnaps = self.clsnaps[ctype]
for clsnap in clsnaps:
angspreadmat = clsnap.nnangSpread(ainds)
for i in range(np.shape(angspreadmat)[0]):
fid.write('{0}\t{1}\t{2}\t{3}\n'.format(clsnap.timestep,
angspreadmat[i,0],angspreadmat[i,1],
angspreadmat[i,2]))
fid.close()
class ClusterSnapshot(object):
"""Class for tracking the location of clusters at each time step"""
def __init__(self, t, traj, ats):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
traj: a gsd.hoomd trajectory
ats: the number of beads in a single molecule
Raises
------
RuntimeError
if the number of particles doesn't divide evenly into molecules
"""
snapshot = traj[t]
self.timestep = t
self.ats = ats
binds = np.argsort(snapshot.particles.body)
self.pos = snapshot.particles.position[binds]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by number \
of beads per molecules.")
self.nclusts = ats
self.clusterIDs = np.zeros(sz[0]/ats)
class ContactClusterSnapshot(ClusterSnapshot):
"""Class for tracking the location of contact clusters at each time step
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
box: 1 x 6 numpy array
defines triclinic box
"""
def __init__(self, t, trajectory, ats, molno):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of beads in a single molecule
molno: the number of molecules in the system
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
"""
self.timestep = t
self.ats = ats
self.box = None
self.rng = None
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
else:
if t != -1:
snapshot = trajectory[t]
self.box = snapshot.configuration.box
binds = np.argsort(snapshot.particles.body)
self.pos = snapshot.particles.position[binds]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
self.pos = snapshot.particles.position
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
def getCArrayLen(self):
"""
returns the numpy length of an array made by toArray
Returns
-------
carrayLen: int that is the length of the numpy array made by the
toArray fcn
"""
sz = np.shape(self.pos)
molno = sz[0]
carrayLen = 4 + 3 * self.ats * molno + molno
return carrayLen
def toArray(self):
"""
Put all the cluster information into a numpy array, for use with
mpi4py
Returns
-------
carray: numpy array containing all information in a specific order
Can be turned back into a cluster by calling the arrayToCluster
function in this module
Notes
-----
Contains:
[timestep (size 1) nclusts (size 1) ats (size 1)
positions (size 3 * ats * molno) clusterIDs (size molno)]
"""
sz = np.shape(self.pos)
carray = np.zeros(4 + 3 * self.ats * sz[0] + sz[0])
carray[0] = self.timestep
carray[1] = self.nclusts
carray[2] = self.ats
molno = sz[0]
carray[3] = molno
pend = 4 + 3 * self.ats * molno
carray[4:pend] = np.reshape(self.pos,[1,3*self.ats*molno])
clen = molno
carray[pend:(pend + clen)] = self.clusterIDs
return carray
def setClusterID(self,cutoff):
"""
Set the cluster IDs using getClusterID
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
BT: BallTree of the system
"""
(nclusts,clusterIDs,BT) = \
self.getClusterID(self.pos,cutoff,conOptDistanceCython)
self.nclusts = nclusts
self.clusterIDs = clusterIDs
return BT
def setClusterIDFromFile(self,fname,line=None):
"""
Set the cluster IDs by opening a file and checking what they are
Parameters
----------
fname: string
the name of the file that contains the clusterIDs
line: int
the line number if it differs from the timestep of the cluster snap
Returns
-------
None, just sets clusterIDs
Notes
-----
File format is as written out by this code package
"""
f = open(fname)
lines = f.readlines()
f.close()
if line is None:
line = self.timestep
cIDs = lines[line].split()
self.clusterIDs = np.array([int(float(cID)) for cID in cIDs])
def getClusterID(self, positions,cutoff,func):
"""
Find the ID of which cluster each molecule is in
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
clusterIDs: numpy array of the cluster index of the cluster that
each molecule occupies
nclusts: number of clusters
BT: BallTree for possible other computations
"""
sz = np.shape(positions)
pos3 = positions.reshape((int(sz[0]*sz[1]/3),3))
BT = BallTree(pos3,metric='euclidean')
rng = radius_neighbors_graph(BT,np.sqrt(cutoff))
rng = squashRNGCOOCython(rng,int(sz[1]/3))
self.rng = rng
(nclusts,clusterIDs) = connected_components(rng,directed=False,
return_labels=True,
connection='weak')
#pdb.set_trace()
return (nclusts,clusterIDs,BT)
def idsToSizes(self):
"""
Takes the cluster IDs and returns a list that for each molecule
gives the size of the cluster it is participating in
Returns
-------
clustSizes: numpy array
"""
clustSizes = np.arange(len(self.clusterIDs))
u,counts = np.unique(self.clusterIDs,return_counts=True)
dcounts = dict(zip(u,counts))
for cid in range(len(self.clusterIDs)):
clustSizes[cid] = dcounts[self.clusterIDs[cid]]
return clustSizes
def fixPBC(self,cID,cutoff,writegsd=None,BT=None):
"""
return positions for a particular cluster fixed across PBCs for
calculation of structural metrics like end-to-end length
Parameters
----------
cID: int
the cluster index for this particular cluster
cutoff: float
distance within which to search for neighbors
writegsd: bool
if not none, write out a gsd file to this name that shows the
resultant cluster
BT: precomputed BallTree for cluster
if this is none, recompute the BallTree
Returns
-------
pos: numpy array of floats
the resultant positions of the cluster
Raises
------
RuntimeError: if there is more than one connected component
Notes
-----
Currently origin is in the center of the box; for these purposes,
all positions are reset such that the origin is at the corner.
"""
inds = np.where(self.clusterIDs==cID)[0]
positions = self.pos[inds,:]
sz = np.shape(positions)
box = self.box[0:3]
fixedXYZ = positions.copy()
potInds = range(1,int(sz[0]))
#if BT is None:
BT = BallTree(positions.reshape((int(sz[0]*sz[1]/3),3)),
metric='euclidean')
rng = radius_neighbors_graph(BT,np.sqrt(cutoff))
rng = squashRNGCOOCython(rng,int(sz[1]/3))
(nCC,CC) = connected_components(rng,connection='weak')
if nCC != 1:
raise RuntimeError("This isn't a fully connected cluster.")
fixedXYZ[0,:] = fixCoords(fixedXYZ[0,:].copy(),fixedXYZ[0,0:3].copy(),
box)
correctInds = [0]
while len(correctInds) > 0:
mol = correctInds.pop()
#neighs = BT.query_radius(positions[mol,:].reshape(1,-1),r=cutoff)[0]
#neighs = neighs.remove(mol)
neighs = np.where(rng[mol,:].toarray()[0]==1)[0]
for n in neighs:
#pdb.set_trace()
if n in potInds:
potInds.remove(n)
correctInds.append(n)
fixedXYZ[n,:] = fixCoords(fixedXYZ[n,:].copy(),
fixedXYZ[mol,0:3].copy(),box)
else:
continue
if writegsd is not None:
f = gsd.hoomd.open(writegsd,'wb')
s = gsd.hoomd.Snapshot()
s.particles.N = sz[0]*sz[1]/3
s.particles.position = fixedXYZ
s.configuration.box = np.concatenate((box,[0,0,0]))
f.append(s)
return fixedXYZ
def getLengthDistribution(self,cutoff,box,func=conOptDistanceCython,
writegsd=None,BT=None):
""" Finds the end-to-end cluster length distribution
Parameters
----------
cutoff: float
Cutoff for BallTree computation for unwrapping
box: 1x3 numpy array
box side lengths
func: python function
distance metric for BallTree computation
writegsd: string or None
used as the base filename to write out all clusters as separate
gsd files. Mostly useful for debugging purposes.
BT: None or BallTree
BallTree for cluster computation
Recomputes if None
Returns
-------
ldistrib: 1 x molno numpy array
length of the cluster each molecule belongs to
"""
ldistrib = np.zeros(len(self.pos))
for cID in range(self.nclusts):
inds = np.where(self.clusterIDs==cID)[0]
if len(inds) > 1:
if writegsd is not None:
cIDpos = self.fixPBC(cID,cutoff,
writegsd=writegsd+str(cID)+'.gsd',
BT=BT)
else:
cIDpos = self.fixPBC(cID,cutoff,BT=BT)
sz = np.shape(cIDpos)
#extract COM positions
xcom = np.sum(cIDpos[:,range(0,sz[1],3)],axis=1)/(sz[1]/3.)
ycom = np.sum(cIDpos[:,range(1,sz[1],3)],axis=1)/(sz[1]/3.)
zcom = np.sum(cIDpos[:,range(2,sz[1],3)],axis=1)/(sz[1]/3.)
cIDposcom = np.array([xcom,ycom,zcom])
endendl = np.sqrt(max(pdist(cIDposcom.transpose(),metric='sqeuclidean')))
ldistrib[inds] = endendl
return ldistrib
def gyrTensPy(self,posList):
"""
Function that computes the gyration tensor of a subset of atoms
in a cluster
----------
Parameters
----------
posList: numpy array
list of positions involved in computation
------
Raises
------
ValueError
if posList doesn't divide evenly by three
"""
if len(posList) % 3 !=0:
raise ValueError("position list should divide evenly by 3")
gT = np.zeros([3,3])
for i in range(3):
for j in range(3):
gT[i][j] = gyrTensxyCy(posList,i,j,self.box[i],self.box[j])
return gT
def gyrTensxy(self,posList,x,y,boxlx,boxly):
"""
Compute in Python the gyration tensor entry at position (x,y)
----------
Parameters
----------
posList: numpy array
the positions of the relevant beads/atoms
x: int
position 1 in tensor
y: int
position 2 in tensor
boxlx: float
box length in first direction
boxly: float
box length in second direction
-------
Returns
-------
gxy: float
gyration tensor entry G[x,y]
------
Raises
------
ValueError
if posList doesn't divide evenly by three
"""
gxy = 0
if len(posList) % 3 !=0:
raise ValueError("position list should divide evenly by 3")
N = int(len(posList)/3)
for R in range(N):
for S in range(N):
V = posList[3*R+x]-posList[3*S+x]
V = V - boxlx*round(V/boxlx)
U = posList[3*R+y]-posList[3*S+y]
U = U - boxly*round(U/boxly)
#print U,V
gxy = gxy + V*U
gxy = gxy/(2*N**2)
return gxy
def gyrPrinc(self,aposFix):
"""
find and return the principal component of the gyration tensor
----------
Parameters
----------
aposFix: numpy array
list of relevant positions
-------
Returns
-------
g1: 1 x 3 numpy array
the principal eigenvector corresponding to the first principal
moment of the gyration tensor
"""
gTens = self.gyrTensPy(aposFix)
(eigval,eigvec) = np.linalg.eig(gTens)
eigOrder = np.argsort(-1*eigval)
eigvec = eigvec[:,eigOrder]
g1 = eigvec[:,0]
return g1
def angSpread(self,cutoff,ainds,tol=1e-16):
"""
For each cluster, compute the spread of aromatics angles perpendicular
to the gyration tensor of the cluster based on the aromatics but not
the side chains
----------
Parameters
----------
cutoff: float
cutoff within which to search for neighbors when unwrapping cluster
ainds: list of ints
the indices of the beads/atoms participating in the aromatics
tol: float
defaults to 1e-16
the tolerance below which we assume there is no perpendicular
component and assign an angle of 0
-------
Returns
-------
angSpreadMat: C x 3 numpy array
C = number of clusters
col0 = size of cluster
col1 = spread of perpendicular angles
col2 = average length of projection onto the gyration tensor
------
Raises
------
ValueError:
if the argument to arccos is > 1 or NaN
-----
Notes
-----
Angles can only be between 0 and pi/2 due to vector restriction
"""
uCIDs = np.unique(self.clusterIDs)
angSpreadMat = np.zeros((len(uCIDs),3))
tinds = np.zeros(3*len(ainds)).astype(int)
ind = 0
for aind in ainds:
aind3 = np.arange(3*aind,(3*aind+3))
tinds[ind:ind+3] = aind3
ind += 3
ainds = tinds
for clustID in uCIDs:
csize = len(np.argwhere(self.clusterIDs == clustID))
angSpreadMat[clustID,0] = csize
posFix = self.fixPBC(clustID,cutoff)
aposFix = posFix[:,ainds]
gclust1 = self.gyrPrinc(aposFix.reshape(np.shape(aposFix)[0] \
* np.shape(aposFix)[1]))
ngclust1 = np.linalg.norm(gclust1)
rperpn0 = 0.
i0 = 0
skipflag = False
while rperpn0 < tol:
gmol0 = self.gyrPrinc(aposFix[i0,:])
dgr0 = np.dot(gclust1,gmol0)
rperp0 = gmol0 - dgr0*gclust1
rperpn0 = np.linalg.norm(rperp0)
i0 += 1
if i0 == np.shape(aposFix)[0]:
angSpreadMat[clustID,1] = 0.
angSpreadMat[clustID,2] = 1.
skipflag = True
break
if not skipflag:
rperph0 = rperp0 / np.linalg.norm(rperp0)
thlsmat = np.zeros((csize,2))
for mol in range(0,np.shape(aposFix)[0]):
gmol = self.gyrPrinc(aposFix[mol,:])
dgr = np.dot(gclust1,gmol)
lpar = dgr / ngclust1
rperp = gmol - dgr*gclust1
rperpn = np.linalg.norm(rperp)
if rperpn > tol:
rperph = rperp / np.linalg.norm(rperp)
drperp = np.abs(np.dot(rperph,rperph0))
if drperp > 1 and drperp < 1 + 10*tol:
drperp = 1.
if drperp > 1. or drperp is np.nan:
#pdb.set_trace()
raise ValueError("Cannot take arccos of value > 1")
thperp = np.arccos(drperp)
else:
thperp = 0.
thlsmat[mol,0] = thperp
thlsmat[mol,1] = lpar
#pdb.set_trace()
#pdb.set_trace()
if csize == 1:
ddof = 0
else:
ddof = 1
angSpreadMat[clustID,1] = np.std(thlsmat[:,0],ddof=ddof)
angSpreadMat[clustID,2] = np.mean(thlsmat[:,1])
return angSpreadMat
def nnangSpread(self,ainds,tol=1e-16):
"""
For each bond, check the angle between the two participating molecules
----------
Parameters
----------
ainds: indices of aromatic positions
tol: float
tolerance for numerical imprecision
-------
Returns
-------
angspreadMat: C x 3 numpy array
col 1 is cluster size
col 2 is the mean angle between bonded molecules
col 3 is the stddev between bonded molecules
------
Raises
------
NotImplementedError:
if rng is still set to None or clusterIDs are still set to -1
ValueError:
if you try to take the arccos of a value > 1
"""
if self.rng is None:
raise NotImplementedError("Must set adjacency matrix first")
if self.clusterIDs[0] == -1:
raise NotImplementedError("Must set cluster IDs first")
tinds = np.zeros(3*len(ainds)).astype(int)
ind = 0
for aind in ainds:
aind3 = np.arange(3*aind,(3*aind+3))
tinds[ind:ind+3] = aind3
ind += 3
ainds = tinds
binds = getIndsCsr(self.rng)
angspreadMat = np.zeros((self.nclusts,3))
m = np.shape(binds)[0]
if m == 0:
return np.array([])
thidMat = np.zeros((np.shape(binds)[0],2))
#pdb.set_trace()
for i in range(m):
bi = int(binds[i,0])
bj = int(binds[i,1])
#pdb.set_trace()
pi = self.pos[bi,:]
pj = self.pos[bj,:]
pi = pi[ainds]
pj = pj[ainds]
gi = self.gyrPrinc(pi)
gj = self.gyrPrinc(pj)
dij = abs(np.dot(gi,gj))
if dij > 1 and dij < 1 + 10*tol:
dij = 1.
if dij > 1. or dij is np.nan:
raise ValueError("Cannot take arccos of value > 1")
thij = np.arccos(dij)
thidMat[i,1] = thij
thidMat[i,0] = self.clusterIDs[bi]
for cid in self.clusterIDs:
csize = len(np.argwhere(self.clusterIDs==cid))
thinds = np.argwhere(thidMat[:,0] == cid)
angspreadMat[cid,0] = csize
if csize >= 2:
angspreadMat[cid,1] = np.mean(thidMat[thinds,1])
if csize == 2:
ddof = 0
else:
ddof = 1
angspreadMat[cid,2] = np.std(thidMat[thinds,1],ddof=ddof)
#pdb.set_trace()
return angspreadMat
def getIntermixByCluster(self):
"""
Figure out the number of bonds between same types, the number of
bonds between different types, and the total number of bonds for
each cluster
-------
Returns
-------
nbs: C x 4 numpy array of ints
col 1 is cluster size
col 2 is the number of connections between like molecules
col 3 is number of connections between unlike molecules
col 4 is number of total connections between molecules
------
Raises
------
Not Implemented Error: if rng is still set to None or clusterIDs are
still set to -1
-----
Notes
-----
* should double-check for whether we need to divide by two for
symmetric bonds
"""
if self.rng is None:
raise NotImplementedError("Must set adjacency matrix first")
if self.clusterIDs[0] == -1:
raise NotImplementedError("Must set cluster IDs first")
binds = getIndsCsr(self.rng)
nbs = np.zeros((self.nclusts,4)).astype(int)
m = np.shape(binds)[0]
for i in range(m):
bi = binds[i,0]
bj = binds[i,1]
ti = self.typeIDs[bi]
tj = self.typeIDs[bj]
if ti == tj:
nbs[self.clusterIDs[bi],1] += 1
else:
nbs[self.clusterIDs[bi],2] += 1
nbs[self.clusterIDs[bi],3] += 1
#if self.timestep > 0:
# pdb.set_trace()
for i in range(self.nclusts):
nbs[i,0] = len(np.argwhere(self.clusterIDs==i))
return nbs
class ContactClusterHeteroSnapshot(ContactClusterSnapshot):
"""
Contact cluster that maintains indices of different molecule types.
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
typeIDs: list [len M]
indices that indicate different molecule types
rng: csr matrix or None
adjacency matrix of clusters
"""
def __init__(self, t, trajectory, ats, molno,typelist):
""" Initialize a Heterogeneous Cluster Snapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of beads in a single molecule
molno: the number of molecules in the system
typelist: list
of central bead types in snapshot
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
"""
self.timestep = t
self.ats = ats
snapshot = trajectory[t]
self.box = snapshot.configuration.box
binds = np.argsort(snapshot.particles.body)
self.pos = snapshot.particles.position[binds]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.nclusts = molno
self.clusterIDs = -1*np.ones(int(sz[0] / ats)).astype(int)
m = sz[0] / ats
self.typeIDs = np.zeros(m).astype(int)
for t in range(len(typelist)):
ti = snapshot.particles.types.index(typelist[t])
tloc = np.argwhere(snapshot.particles.typeid == ti)
self.typeIDs[snapshot.particles.body[tloc]] = ti
self.rng = None
def getClusterID(self, positions,cutoff,func):
"""
Find the ID of which cluster each molecule is in
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
clusterIDs: numpy array of the cluster index of the cluster that
each molecule occupies
nclusts: number of clusters
BT: BallTree for possible other computations
"""
sz = np.shape(positions)
pos3 = positions.reshape((int(sz[0]*sz[1]/3),3))
BT = BallTree(pos3,metric='euclidean')
rng = radius_neighbors_graph(BT,np.sqrt(cutoff))
rng = squashRNGCOOCython(rng,int(sz[1]/3))
self.rng = rng
(nclusts,clusterIDs) = connected_components(rng,directed=False,
return_labels=True,
connection='weak')
#pdb.set_trace()
return (nclusts,clusterIDs,BT)
class OpticalClusterSnapshot(ContactClusterSnapshot):
"""Class for tracking the location of optical clusters at each time step"""
def __init__(self, t, trajectory, ats, molno, atype=u'LS'):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of aromatics in a single molecule
molno: the number of molecules in the system
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
An optical cluster snapshot tracks the positions of the COMs of the
optical clusters, rather than the positions of the separate beads,
as the contact cluster does
"""
self.timestep = t
self.ats = ats
self.box = None
self.rng = None
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
else:
if t != -1:
snapshot = trajectory[t]
#self.pos = self.getComs(compairs,atype,trajectory[t],molno)
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
self.box = snapshot.configuration.box
self.pos = \
snapshot.particles.position[np.where(types==tind)[0]]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
#self.pos = self.getComs(compairs,atype,snapshot,molno)
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
self.pos = \
snapshot.particles.position[np.where(types==tind)[0]]
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
class OpticalClusterHeteroSnapshot(ContactClusterHeteroSnapshot):
"""Class for tracking the location of optical clusters at each time step
for a heterogeneous system
"""
def __init__(self, t, trajectory, ats, molno, typelist, atype=u'LS'):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of aromatics in a single molecule
molno: the number of molecules in the system
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
typelist: list
list of different molecule types should be u'EA', u'EB' or similar
atype: hoomd bead type
should be the type referring to the aromatic beads
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
An optical cluster snapshot tracks the positions of the COMs of the
optical clusters, rather than the positions of the separate beads,
as the contact cluster does
"""
self.timestep = t
self.ats = ats
snapshot = trajectory[t]
self.box = snapshot.configuration.box
#self.pos = self.getComs(compairs,atype,trajectory[t],molno)
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
self.pos = \
snapshot.particles.position[np.where(types==tind)[0]]
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
self.clusterIDs = -1*np.ones(int(sz[0] / ats)).astype(int)
m = sz[0] / ats
self.typeIDs = np.zeros(m).astype(int)
for t in range(len(typelist)):
ti = snapshot.particles.types.index(typelist[t])
tloc = np.argwhere(snapshot.particles.typeid == ti)
self.typeIDs[snapshot.particles.body[tloc]] = ti
self.rng = None
class AlignedClusterSnapshot(OpticalClusterSnapshot):
"""Class for tracking the location of aligned clusters at each time step"""
def getComsGeneral(self,compairs,atype,snapshot,molno):
"""Helper function to get the COMs of a subset of beads
Parameters
----------
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
snapshot: gsd snapshot at the particular time of interest
molno: int
number of molecules in snapshot
Returns
-------
aCOMS: nPairs x 3 numpy array
array of COM positions for each bead
Raises
------
RuntimeError
if the number of beads in the aromatics isn't equal to the
total number of aromatics * beads in an aromatic
Notes
-----
This is the more general way of finding COM and can be used in the
future but currently should not be called.
"""
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
ats = self.ats
aBeads = snapshot.particles.position[np.where(types==tind)[0]]
pairShape = np.shape(compairs)
nPairs = pairShape[0]
aromSize = pairShape[1]
beadNo = np.shape(aBeads)[0]
if nPairs * aromSize != beadNo / molno:
raise RuntimeError("number of beads ({0} in {1} molecules)\
does not divide cleanly \
among aromatics ({2}) of size {3}".format(beadNo,molno,nPairs,
aromSize))
aCOMs = np.zeros([nPairs * molno,3])
for moli in range(molno):
aBeadsMol = aBeads[(moli * beadNo / molno):(moli * beadNo / molno)\
+ beadNo / molno,:]
for m in range(nPairs):
aCOMs[moli*nPairs + m,:] = np.mean(aBeadsMol[compairs[m]],
axis=0)
return aCOMs
def __init__(self, t, trajectory, ats, molno,
compairs=np.array([[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]),
atype=u'LS'):
""" Initialize a ClusterSnapshot object.
Parameters
----------
t: timestep
trajectory: gsd.hoomd trajectory or numpy array
numpy array is of size 4 + 3 * ats * molno
(ats is different for optical and aligned clusters)
ats: the number of aromatics in a single molecule
molno: the number of molecules in the system
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
An aligned cluster snapshot just uses a different distance metric
from an optical cluster snapshot
"""
self.timestep = t
self.ats = ats
self.box = None
if type(trajectory) is np.ndarray:
carray = trajectory
self.timestep = int(carray[0])
self.ats = int(carray[2])
self.nclusts = carray[1]
pend = 4 + 3 * ats * molno
self.pos = np.reshape(carray[4:pend],[molno,3*ats])
self.clusterIDs = carray[pend:len(carray)]
self.box = None
else:
if t != -1:
snapshot = trajectory[t]
self.box = snapshot.configuration.box
self.pos = self.getComs(compairs,atype,trajectory[t],molno)
sz = np.shape(self.pos)
if sz[0] % ats != 0:
raise RuntimeError("Number of particles not divisible by \
number of beads per molecules.")
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
else:#create a dummy object to help with mpi scattering
snapshot = trajectory[0]
self.box = snapshot.configuration.box
self.pos = self.getComs(compairs,atype,snapshot,molno)
sz = np.shape(self.pos)
self.pos = np.reshape(self.pos,[sz[0] / ats , 3 * ats])
self.pos = float('NaN') * self.pos
self.nclusts = molno
self.clusterIDs = range(int(sz[0] / ats))
def getComs(self,compairs,atype,snapshot,molno,missingID=None):
"""Helper function to get the COMs of a subset of beads
Parameters
----------
compairs: m x n numpy array
these are the comparative indices of the beads making up each
aromatic group, where m is the number of aromatics and n is the
number of beads in the group, eg for two beads representing a
ring in the 3-core model, this should be
[[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]]
atype: hoomd bead type
should be the type referring to the aromatic beads
snapshot: gsd snapshot at the particular time of interest
molno: int
number of molecules in snapshot
Returns
-------
aCOMS: nPairs x 3 numpy array
array of COM positions for each bead
Raises
------
RuntimeError
if the number of beads in the aromatics isn't equal to the
total number of aromatics * beads in an aromatic
RuntimeError
if the pairs aren't pairs -> that requires a DIFFERENT KIND OF
CLUSTER
NotImplementedError
if box isn't set
Notes
-----
For this type of cluster, we check the vector pointing between the
first bead pair and assume that the COM is located at bead1 + 1/2(vec)
for all three COMs
This will *only* work for COM pairs of beads and you need to know
which way rvec should be going! (which depends on which bead is
missing, if any of them are.) If there is a bead missing from the
pairs you MUST check which one it is and pass in whether rvec
should be reversed.
"""
if self.box is None:
raise NotImplementedError("You are running on a cluster created from an array, which does not yet support box type analysis.")
tind = snapshot.particles.types.index(atype)
types = snapshot.particles.typeid
aBeads = snapshot.particles.position[np.where(types==tind)[0]]
pairShape = np.shape(compairs)
nPairs = pairShape[0]
aromSize = pairShape[1]
if pairShape[1] != 2:
raise RuntimeError("Not pairs. Call the general getCOM function")
beadNo = np.shape(aBeads)[0]
if nPairs * aromSize != beadNo / molno:
raise RuntimeError("number of beads ({0} in {1} molecules)\
does not divide cleanly \
among aromatics ({2}) of size {3}".format(beadNo,molno,nPairs,
aromSize))
aCOMs = np.zeros([nPairs * molno,3])
for moli in range(molno):
aBeadsMol = aBeads[(moli * beadNo / molno):(moli * beadNo / molno)\
+ beadNo / molno,:]
#pdb.set_trace()
rvec = (aBeadsMol[compairs[0][1]] - aBeadsMol[compairs[0][0]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
for m in range(nPairs):
if compairs[m][1] == missingID:
rvec = (aBeadsMol[compairs[0][1]] \
- aBeadsMol[compairs[0][0]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][0]]+rvec
#pdb.set_trace()
elif compairs[m][0] == missingID:
rvec = (aBeadsMol[compairs[0][0]] \
- aBeadsMol[compairs[0][1]])/2
rvec = rvec - np.around(rvec/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][0]]+rvec
#pdb.set_trace()
else:
cv = aBeadsMol[compairs[m][0]] - aBeadsMol[compairs[m][1]]
cv = cv - np.around(cv/self.box[0:3])*self.box[0:3]
comloc = aBeadsMol[compairs[m][1]]+cv/2
#comloc = np.mean(aBeadsMol[compairs[m]],axis=0)
#pdb.set_trace()
aCOMs[moli*nPairs + m,:] = comloc
return aCOMs
def writeCOMsGSD(self,gsdname):
""" Write out a GSD file of this snapshot that shows the locations of
the aligned COMs after initialization
Parameters
----------
gsdname: string
what name to save the file to
"""
try:
gsdf = gsd.hoomd.open(gsdname,'ab')
except IOError:
gsdf = gsd.hoomd.open(gsdname,'wb')
sz = np.shape(self.pos)
molno = sz[0]
pos = np.reshape(self.pos,[sz[0]*self.ats,3])
#pdb.set_trace()
pN = sz[0]*self.ats
ptypes = ['A']
ptypeid = np.zeros(molno*self.ats).astype(int)
pbox = self.box
s = gsd.hoomd.Snapshot()
s.particles.N = pN
s.configuration.step = self.timestep
s.particles.types = ptypes
s.particles.typeid = ptypeid
s.configuration.box = pbox
s.particles.position = pos
gsdf.append(s)
def setClusterID(self,cutoff):
"""
Set the cluster IDs using getClusterID
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
BT: BallTree
for length computation
"""
(nclusts,clusterIDs,BT) = \
self.getClusterID(self.pos,cutoff,alignDistancesCython)
self.nclusts = nclusts
self.clusterIDs = clusterIDs
return BT
def getClusterID(self, positions,cutoff,func):
"""
Find the ID of which cluster each molecule is in
Parameters
----------
cutoff: the squared distance molecules have to be within to be
part of the same cluster
Returns
-------
clusterIDs: numpy array of the cluster index of the cluster that
each molecule occupies
nclusts: number of clusters
BT: BallTree for possible other computations
"""
BT = BallTree(positions,metric='pyfunc',
func=func)
rng = radius_neighbors_graph(BT,cutoff)
(nclusts,clusterIDs) = connected_components(rng,directed=False,
return_labels=True)
return (nclusts,clusterIDs,BT)
class ContactClusterSnapshotXTC(ContactClusterSnapshot):
""" Class for tracking contact cluster locations that are initialized
from an xtc/Gromacs file instead of a HOOMD one
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
"""
def readGro(self,fName):
""" Get a list of positions from a Gromacs .gro file
Parameters
----------
fname: string
name of .gro file
Returns
-------
pos: numpy vector [len 3 * molecules * ats]
1D list of positions in .gro file
"""
with open(fName, 'r') as myF:
myLns = myF.read().splitlines()
boxL1 = float(myLns[len(myLns)-1].split()[0])
boxL2 = float(myLns[len(myLns)-1].split()[1])
boxL3 = float(myLns[len(myLns)-1].split()[2])
return (np.array([[float(myLns[i][20:].split()[0]),
float(myLns[i][20:].split()[1]),
float(myLns[i][20:].split()[2])]\
for i in range(2, len(myLns)-1)]).flatten(),
np.array([boxL1,boxL2,boxL3]))
def __init__(self, t, trj, ats, molno):
""" Initialize a ContactClusterSnapshotXTC object.
Parameters
----------
t: timestep
trj: Gromacs trajectory name (xtc format)
ats: the number of beads in a single molecule
molno: the number of molecules in the system
the index of the cluster that each molecule belongs to
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
You can create a ClusterSnapshot object from either an array (for use
with MPI) or from a HOOMD trajectory
"""
self.timestep = t
self.ats = ats
self.nclusts = molno
self.clusterIDs = np.zeros(molno)
(self.pos,self.box) = self.readGro(trj)
self.rng = None
if len(self.pos) != 3 * molno * ats:
raise RuntimeError("incorrect number of atoms or molecules")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[molno,3*ats])
class OpticalClusterSnapshotXTC(ContactClusterSnapshotXTC):
""" Class for tracking optical cluster locations that are initialized
from an xtc/Gromacs file instead of a HOOMD one
Attributes
----------
timestep: float
timestep
ats: int
number of beads per molecule
nclusts: int
number of clusters in the snapshot
pos: numpy array [M x 3*ats]
locations of molecules and beads within molecules
each molecule is its own line and then the locations of beads
are flattened within that
clusterIDs: list [len M]
"""
def __init__(self, t, trj, ats, molno, comIDs):
""" Initialize a ContactClusterSnapshotXTC object.
Parameters
----------
t: timestep
trj: Gromacs trajectory name (xtc format)
tpr: Gromacs run file name (tpr format)
outGro: name for an output Gromacs .gro file
ats: the number of beads in a single molecule
molno: the number of molecules in the system
the index of the cluster that each molecule belongs to
comIDs: N x M numpy array of ints
bead IDs of the beads in the N cores with M participating beads
each
Raises
------
RuntimeError
if the number of particles does not divide evenly up into molecules
Notes
-----
* Need to calculate COMs with PBCs accounted for or you get artifacts
because we're specifically breaking these things over the box in order
to calculate this correctly
* We are assuming that the masses of all beads in the COMs are the same
"""
self.timestep = t
self.ats = ats
self.nclusts = molno
self.clusterIDs = np.zeros(molno)
(self.pos,boxL) = self.readGro(trj)
self.box = boxL
self.rng = None
if len(self.pos) != 3 * molno * ats:
raise RuntimeError("incorrect number of atoms or molecules")
#pdb.set_trace()
self.pos = np.reshape(self.pos,[molno,3*ats])
M = np.shape(comIDs)[1]
pos = np.zeros((molno,3*np.shape(comIDs)[0]))
for mol in range(molno):
for com in range(np.shape(comIDs)[0]):
inds = comIDs[com,:]
com0 = self.pos[mol,(3*inds[0]):(3*inds[0]+3)]
compos = np.zeros((1,3))
for cbeadi in range(1,M):
comcurr = self.pos[mol,
(3*inds[cbeadi]):(3*inds[cbeadi]+3)]
dcomcurr = comcurr-com0
for xi in range(3):
dcomcurr[xi] = dcomcurr[xi] - \
boxL[xi]*np.round(dcomcurr[xi]/boxL[xi])
compos += dcomcurr
compos = compos / M
compos = compos + com0
pos[mol,3*com:(3*com+3)] = compos
self.pos = pos
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/clustering.py",
"copies": "1",
"size": "98464",
"license": "mit",
"hash": 4053610474325787000,
"line_mean": 34.5209235209,
"line_max": 138,
"alpha_frac": 0.5095872603,
"autogenerated": false,
"ratio": 4.199249402934152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02079719868267004,
"num_lines": 2772
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import gzip
from itertools import zip_longest
def _ascii_to_phred(s, offset):
"""Convert ascii to Phred quality score with specified ASCII offset."""
return np.fromstring(s, dtype='|S1').view(np.int8) - offset
def ascii_to_phred33(s):
"""Convert ascii string to Phred quality score with ASCII offset of 33.
Standard "Sanger" ASCII offset of 33. This is used by Illumina in CASAVA
versions after 1.8.0, and most other places. Note that internal Illumina
files still use offset of 64
"""
return _ascii_to_phred(s, 33)
def ascii_to_phred64(s):
"""Convert ascii string to Phred quality score with ASCII offset of 64.
Illumina-specific ASCII offset of 64. This is used by Illumina in CASAVA
versions prior to 1.8.0, and in Illumina internal formats (e.g.,
export.txt files).
"""
return _ascii_to_phred(s, 64)
def _drop_id_marker(s):
"""Drop the first character and decode bytes to text"""
id_ = s[1:]
try:
return str(id_.decode('utf-8'))
except AttributeError:
return id_
def parse_fastq(data, strict=False, enforce_qual_range=True, phred_offset=33):
r"""yields label, seq, and qual from a fastq file.
.. note:: Deprecated in scikit-bio 0.2.0-dev
``parse_fastq`` will be removed in scikit-bio 0.3.0. It is replaced by
``read``, which is a more general method for deserializing
FASTQ-formatted files. ``read`` supports multiple file formats,
automatic file format detection, etc. by taking advantage of
scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
Parameters
----------
data : open file object or str
An open fastq file (opened in binary mode) or a path to it.
strict : bool, optional
Defaults to ``False``. If strict is true a FastqParse error will be
raised if the seq and qual labels dont' match.
enforce_qual_range : bool, optional
Defaults to ``True``. If ``True``, an exception will be raised if a
quality score outside the range [0, 62] is detected
phred_offset : {33, 64}, optional
What Phred offset to use when converting qual score symbols to integers
Returns
-------
label, seq, qual : (str, bytes, np.array)
yields the label, sequence and quality for each entry
Examples
--------
Assume we have a fastq formatted file with the following contents::
@seq1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@seq2
TATGTATATATAACATATACATATATACATACATA
+
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
We can use the following code:
>>> from StringIO import StringIO
>>> from skbio.parse.sequences import parse_fastq
>>> fastq_f = StringIO('@seq1\n'
... 'AACACCAAACTTCTCCACCACGTGAGCTACAAAAG\n'
... '+\n'
... '````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF\n'
... '@seq2\n'
... 'TATGTATATATAACATATACATATATACATACATA\n'
... '+\n'
... ']KZ[PY]_[YY^```ac^\\\`bT``c`\\aT``bbb\n')
>>> for label, seq, qual in parse_fastq(fastq_f, phred_offset=64):
... print(label)
... print(seq)
... print(qual)
seq1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
[32 32 32 32 25 30 20 29 32 29 35 30 35 33 34 35 33 35 35 32 30 12 34 30 35
35 25 20 28 20 28 25 28 23 6]
seq2
TATGTATATATAACATATACATATATACATACATA
[29 11 26 27 16 25 29 31 27 25 25 30 32 32 32 33 35 30 28 28 32 34 20 32 32
35 32 28 33 20 32 32 34 34 34]
"""
if phred_offset == 33:
phred_f = ascii_to_phred33
elif phred_offset == 64:
phred_f = ascii_to_phred64
else:
raise ValueError("Unknown PHRED offset of %s" % phred_offset)
with gzip.open(data, mode='rt') as fi:
iters = [iter(fi)] * 4
for seqid, seq, qualid, qual in zip_longest(*iters):
seqid = seqid.strip()
# If the file simply ended in a blankline, do not error
if seqid is '':
continue
# Error if an incomplete record is found
# Note: seqid cannot be None, because if all 4 values were None,
# then the loop condition would be false, and we could not have
# gotten to this point
if seq is None or qualid is None or qual is None:
raise ValueError("Incomplete FASTQ record found at end "
"of file")
seq = seq.strip()
qualid = qualid.strip()
qual = qual.strip()
seqid = _drop_id_marker(seqid)
try:
seq = str(seq.decode("utf-8"))
except AttributeError:
pass
qualid = _drop_id_marker(qualid)
if strict:
if seqid != qualid:
raise ValueError('ID mismatch: {} != {}'.format(
seqid, qualid))
# bounds based on illumina limits, see:
# http://nar.oxfordjournals.org/content/38/6/1767/T1.expansion.html
qual = phred_f(qual)
if enforce_qual_range and ((qual < 0).any() or (qual > 62).any()):
raise ValueError("Failed qual conversion for seq id: %s. "
"This may be because you passed an "
"incorrect value for phred_offset." %
seqid)
yield (seqid, seq, qual)
| {
"repo_name": "eco32i/keds",
"path": "src/parsers.py",
"copies": "1",
"size": "5717",
"license": "mit",
"hash": -5005583466393571000,
"line_mean": 37.3691275168,
"line_max": 79,
"alpha_frac": 0.572677978,
"autogenerated": false,
"ratio": 3.7341606792945785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48068386572945787,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
from multipledispatch import MDNotImplementedError
from datashape import DataShape, to_numpy
from toolz import curry
from ..partition import partitions
from ..expr import Reduction, Field, symbol
from ..expr import Expr, Slice, ElemWise
from ..expr import nelements
from ..expr import path, shape, Symbol
from ..expr.split import split
from .core import compute
from ..dispatch import dispatch
from ..utils import available_memory, thread_pool
__all__ = []
@dispatch(Slice, h5py.Dataset)
def pre_compute(expr, data, scope=None, **kwargs):
""" Don't push slices into memory, they're about to come in anyway """
return data
@dispatch(Expr, h5py.Dataset)
def pre_compute(expr, data, scope=None, **kwargs):
""" Bring dataset into memory if it's small relative to memory """
nbytes = data.size * data.dtype.alignment
comfortable_memory = available_memory() / 4
if nbytes < comfortable_memory:
return data.value
else:
return data
@dispatch(Expr, h5py.Dataset)
def post_compute(expr, data, scope=None):
""" Bring dataset into memory if it's small relative to memory """
nbytes = data.size * data.dtype.alignment
comfortable_memory = available_memory() / 4
if nbytes < comfortable_memory:
return data.value
else:
return data
@dispatch(Symbol, h5py.Dataset)
def optimize(expr, data):
return expr
@dispatch(Expr, h5py.Dataset)
def optimize(expr, data):
child = optimize(expr._inputs[0], data)
if child is expr._inputs[0]:
return expr
else:
return expr._subs({expr._inputs[0]: child})
@dispatch(Slice, (h5py.File, h5py.Group, h5py.Dataset))
def optimize(expr, data):
child = expr._inputs[0]
if (isinstance(child, ElemWise) and len(child._inputs) == 1
and shape(child._inputs[0]) == shape(child)):
grandchild = child._inputs[0][expr.index]
grandchild = optimize(grandchild, data)
return child._subs({child._inputs[0]: grandchild})
if (isinstance(child, ElemWise) and len(child._inputs) == 2
and shape(child) == shape(expr._inputs[0]) == shape(child._inputs[1])):
lhs, rhs = child._inputs
lhs = lhs[expr.index]
rhs = rhs[expr.index]
lhs = optimize(lhs, data)
rhs = optimize(rhs, data)
return child._subs(dict(zip(child._inputs, (lhs, rhs))))
else:
return expr
@dispatch(Symbol, (h5py.File, h5py.Group, h5py.Dataset))
def compute_up(expr, data, **kwargs):
return data
@dispatch(Field, (h5py.File, h5py.Group))
def compute_up(expr, data, **kwargs):
return data[expr._name]
@dispatch(Slice, h5py.Dataset)
def compute_up(expr, data, **kwargs):
return data[expr.index]
@dispatch(nelements, h5py.Dataset)
def compute_up(expr, data, **kwargs):
return compute_up.dispatch(type(expr), np.ndarray)(expr, data, **kwargs)
@dispatch(Expr, (h5py.File, h5py.Group))
def compute_down(expr, data, **kwargs):
leaf = expr._leaves()[0]
p = list(path(expr, leaf))[::-1][1:]
if not p:
return data
for e in p:
data = compute_up(e, data)
if not isinstance(data, (h5py.File, h5py.Group)):
break
expr2 = expr._subs({e: symbol('leaf', e.dshape)})
return compute_down(expr2, data, **kwargs)
def compute_chunk(source, target, chunk, chunk_expr, parts):
""" Pull out a part, compute it, insert it into the target """
source_part, target_part = parts
part = source[source_part]
result = compute(chunk_expr, {chunk: part})
target[target_part] = result
@dispatch(Expr, h5py.Dataset)
def compute_down(expr, data, map=thread_pool.map, **kwargs):
""" Compute expressions on H5Py datasets by operating on chunks
This uses blaze.expr.split to break a full-array-computation into a
per-chunk computation and a on-aggregate computation.
This uses blaze.partition to pick out chunks from the h5py dataset, uses
compute(numpy) to compute on each chunk and then uses blaze.partition to
aggregate these (hopefully smaller) intermediate results into a local
numpy array. It then performs a second operation (again given by
blaze.expr.split) on this intermediate aggregate
The expression must contain some sort of Reduction. Both the intermediate
result and the final result are assumed to fit into memory
"""
leaf = expr._leaves()[0]
if not any(isinstance(node, Reduction) for node in path(expr, leaf)):
raise MDNotImplementedError()
# Compute chunksize (this should be improved)
chunksize = kwargs.get('chunksize', data.chunks)
# Split expression into per-chunk and on-aggregate pieces
chunk = symbol('chunk', DataShape(*(chunksize + (leaf.dshape.measure,))))
(chunk, chunk_expr), (agg, agg_expr) = \
split(leaf, expr, chunk=chunk)
# Create numpy array to hold intermediate aggregate
shape, dtype = to_numpy(agg.dshape)
intermediate = np.empty(shape=shape, dtype=dtype)
# Compute partitions
source_parts = list(partitions(data, chunksize=chunksize, keepdims=True))
target_parts = list(partitions(intermediate, chunksize=chunk_expr.shape,
keepdims=True))
parts = list(map(curry(compute_chunk, data, intermediate, chunk, chunk_expr),
zip(source_parts, target_parts)))
# Compute on the aggregate
return compute(agg_expr, {agg: intermediate})
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/h5py.py",
"copies": "12",
"size": "5533",
"license": "bsd-3-clause",
"hash": 6659819721293256000,
"line_mean": 31.9345238095,
"line_max": 81,
"alpha_frac": 0.672691126,
"autogenerated": false,
"ratio": 3.6281967213114754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.