text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import functools
import sys
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipBefore35, exec_test
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError, WebSocketClosedError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
@gen.coroutine
def on_message(self, message):
try:
yield self.write_message(message, isinstance(message, bytes))
except WebSocketClosedError:
pass
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
methods_to_test = [
functools.partial(self.write, 'This should not work'),
functools.partial(self.redirect, 'http://localhost/elsewhere'),
functools.partial(self.set_header, 'X-Test', ''),
functools.partial(self.set_cookie, 'Chocolate', 'Chip'),
functools.partial(self.set_status, 503),
self.flush,
self.finish,
]
for method in methods_to_test:
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
method()
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class HeaderEchoHandler(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith('x-test'):
self.set_header(k, v)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class CoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super(CoroutineOnMessageHandler, self).initialize(close_future,
compression_options)
self.sleeping = 0
@gen.coroutine
def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
yield gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class RenderMessageHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(self.render_string('message.html', message=message))
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
**kwargs)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/header_echo', HeaderEchoHandler,
dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
('/path_args/(.*)', PathArgsHandler,
dict(close_future=self.close_future)),
('/coroutine', CoroutineOnMessageHandler,
dict(close_future=self.close_future)),
('/render', RenderMessageHandler,
dict(close_future=self.close_future)),
], template_loader=DictLoader({
'message.html': '<b>{{ message }}</b>',
}))
def get_http_client(self):
# These tests require HTTP/1; force the use of SimpleAsyncHTTPClient.
return SimpleAsyncHTTPClient()
def tearDown(self):
super(WebSocketTest, self).tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
def test_missing_websocket_key(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Version': '13'})
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Version': '12'})
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
yield ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u'hello \u00e9')
response = yield ws.read_message()
self.assertEqual(response, u'hello \u00e9')
yield self.close(ws)
@gen_test
def test_render_message(self):
ws = yield self.ws_connect('/render')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, '<b>hello</b>')
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header_echo' % self.get_http_port(),
headers={'X-Test-Hello': 'hello'}))
self.assertEqual(ws.headers.get('X-Test-Hello'), 'hello')
self.assertEqual(ws.headers.get('X-Extra-Response-Header'), 'Extra-Response-Value')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_write_after_close(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
self.assertIs(msg, None)
with self.assertRaises(WebSocketClosedError):
ws.write_message('hello')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_path_args(self):
ws = yield self.ws_connect('/path_args/hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect('/coroutine')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
if sys.version_info >= (3, 5):
NativeCoroutineOnMessageHandler = exec_test(globals(), locals(), """
class NativeCoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, close_future, compression_options=None):
super().initialize(close_future, compression_options)
self.sleeping = 0
async def on_message(self, message):
if self.sleeping > 0:
self.write_message('another coroutine is already sleeping')
self.sleeping += 1
await gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)""")['NativeCoroutineOnMessageHandler']
class WebSocketNativeCoroutineTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/native', NativeCoroutineOnMessageHandler,
dict(close_future=self.close_future))])
@skipBefore35
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect('/native')
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message('hello1')
yield ws.write_message('hello2')
res = yield ws.read_message()
self.assertEqual(res, 'hello1')
res = yield ws.read_message()
self.assertEqual(res, 'hello2')
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
class ServerPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
], websocket_ping_interval=0.01)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect('/')
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class ClientPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
self.close_future = Future()
return Application([
('/', PingHandler, dict(close_future=self.close_future)),
])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect('/', ping_interval=0.01)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
yield self.close(ws)
# TODO: test that the connection gets closed if ping responses stop.
class MaxMessageSizeTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/', EchoHandler, dict(close_future=self.close_future)),
], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect('/')
# Write a message that is allowed.
msg = 'a' * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + 'b')
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
| {
"repo_name": "Lancher/tornado",
"path": "tornado/test/websocket_test.py",
"copies": "2",
"size": "23022",
"license": "apache-2.0",
"hash": -3040779872807273500,
"line_mean": 34.4730354391,
"line_max": 103,
"alpha_frac": 0.6224046564,
"autogenerated": false,
"ratio": 3.996875,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56192796564,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import sys
import warnings
from collections import Mapping, defaultdict
from distutils.version import LooseVersion
from numbers import Number
import numpy as np
import pandas as pd
import xarray as xr
from . import (
alignment, duck_array_ops, formatting, groupby, indexing, ops, resample,
rolling, utils)
from .. import conventions
from .alignment import align
from .common import (DataWithCoords, ImplementsDatasetReduce,
_contains_datetime_like_objects)
from .coordinates import (
DatasetCoordinates, Indexes, LevelCoordinatesSource,
assert_coordinate_consistent, remap_label_indexers)
from .dtypes import is_datetime_like
from .merge import (
dataset_merge_method, dataset_update_method, merge_data_and_coords,
merge_variables)
from .options import OPTIONS
from .pycompat import (
OrderedDict, basestring, dask_array_type, integer_types, iteritems, range)
from .utils import (
Frozen, SortedKeysDict, decode_numpy_dict_values,
ensure_us_time_resolution, hashable, maybe_wrap_array)
from .variable import IndexVariable, Variable, as_variable, broadcast_variables
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'date',
'time', 'dayofyear', 'weekofyear', 'dayofweek',
'quarter']
def _get_virtual_variable(variables, key, level_vars=None, dim_sizes=None):
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
"""
if level_vars is None:
level_vars = {}
if dim_sizes is None:
dim_sizes = {}
if key in dim_sizes:
data = pd.Index(range(dim_sizes[key]), name=key)
variable = IndexVariable((key,), data)
return key, key, variable
if not isinstance(key, basestring):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) == 2:
ref_name, var_name = split_key
elif len(split_key) == 1:
ref_name, var_name = key, None
else:
raise KeyError(key)
if ref_name in level_vars:
dim_var = variables[level_vars[ref_name]]
ref_var = dim_var.to_index_variable().get_level_variable(ref_name)
else:
ref_var = variables[ref_name]
if var_name is None:
virtual_var = ref_var
var_name = key
else:
if _contains_datetime_like_objects(ref_var):
ref_var = xr.DataArray(ref_var)
data = getattr(ref_var.dt, var_name).data
else:
data = getattr(ref_var, var_name).data
virtual_var = Variable(ref_var.dims, data)
return ref_name, var_name, virtual_var
def calculate_dimensions(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims = OrderedDict()
last_used = {}
scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
for k, var in iteritems(variables):
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError('dimension %r already exists as a scalar '
'variable' % dim)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on %r and length %s on %r' %
(dim, size, k, dims[dim], last_used[dim]))
return dims
def merge_indexes(
indexes, # type: Dict[Any, Union[Any, List[Any]]]
variables, # type: Dict[Any, Variable]
coord_names, # type: Set
append=False, # type: bool
):
# type: (...) -> Tuple[OrderedDict[Any, Variable], Set]
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace = {}
vars_to_remove = []
for dim, var_names in indexes.items():
if isinstance(var_names, basestring):
var_names = [var_names]
names, labels, levels = [], [], []
current_index_variable = variables.get(dim)
for n in var_names:
var = variables[n]
if (current_index_variable is not None and
var.dims != current_index_variable.dims):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims))
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
names.extend(current_index.names)
labels.extend(current_index.labels)
levels.extend(current_index.levels)
else:
names.append('%s_level_0' % dim)
cat = pd.Categorical(current_index.values, ordered=True)
labels.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else:
for n in var_names:
names.append(n)
var = variables[n]
cat = pd.Categorical(var.values, ordered=True)
labels.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(labels=labels, levels=levels, names=names)
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = OrderedDict([(k, v) for k, v in iteritems(variables)
if k not in vars_to_remove])
new_variables.update(vars_to_replace)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names
def split_indexes(
dims_or_levels, # type: Union[Any, List[Any]]
variables, # type: Dict[Any, Variable]
coord_names, # type: Set
level_coords, # type: Dict[Any, Any]
drop=False, # type: bool
):
# type: (...) -> Tuple[OrderedDict[Any, Variable], Set]
"""Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
"""
if isinstance(dims_or_levels, basestring):
dims_or_levels = [dims_or_levels]
dim_levels = defaultdict(list)
dims = []
for k in dims_or_levels:
if k in level_coords:
dim_levels[level_coords[k]].append(k)
else:
dims.append(k)
vars_to_replace = {}
vars_to_create = OrderedDict()
vars_to_remove = []
for d in dims:
index = variables[d].to_index()
if isinstance(index, pd.MultiIndex):
dim_levels[d] = index.names
else:
vars_to_remove.append(d)
if not drop:
vars_to_create[d + '_'] = Variable(d, index)
for d, levs in dim_levels.items():
index = variables[d].to_index()
if len(levs) == index.nlevels:
vars_to_remove.append(d)
else:
vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))
if not drop:
for lev in levs:
idx = index.get_level_values(lev)
vars_to_create[idx.name] = Variable(d, idx)
new_variables = variables.copy()
for v in set(vars_to_remove):
del new_variables[v]
new_variables.update(vars_to_replace)
new_variables.update(vars_to_create)
new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)
return new_variables, new_coord_names
def _assert_empty(args, msg='%s'):
if args:
raise ValueError(msg % args)
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles Datasets, DataArrays and dictionaries of variables. A new Dataset
object is only created if the provided object is not already one.
"""
if hasattr(obj, 'to_dataset'):
obj = obj.to_dataset()
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
class DataVariables(Mapping, formatting.ReprMixin):
def __init__(self, dataset):
self._dataset = dataset
def __iter__(self):
return (key for key in self._dataset._variables
if key not in self._dataset._coord_names)
def __len__(self):
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self, key):
return (key in self._dataset._variables and
key not in self._dataset._coord_names)
def __getitem__(self, key):
if key not in self._dataset._coord_names:
return self._dataset[key]
else:
raise KeyError(key)
def __unicode__(self):
return formatting.data_vars_repr(self)
@property
def variables(self):
all_variables = self._dataset.variables
return Frozen(OrderedDict((k, all_variables[k]) for k in self))
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._dataset._ipython_key_completions_()
if key not in self._dataset._coord_names]
class _LocIndexer(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
if not utils.is_dict_like(key):
raise TypeError('can only lookup dictionaries from Dataset.loc')
return self.dataset.sel(**key)
class Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords,
formatting.ReprMixin):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file, and
consists of variables, coordinates and attributes which together form a
self describing dataset.
Dataset implements the mapping interface with keys given by variable names
and values given by DataArray objects for each variable name.
One dimensional variables with name equal to their dimension are index
coordinates used for label based indexing.
"""
_groupby_cls = groupby.DatasetGroupBy
_rolling_cls = rolling.DatasetRolling
_resample_cls = resample.DatasetResample
def __init__(self, data_vars=None, coords=None, attrs=None,
compat='broadcast_equals'):
"""To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
data_vars : dict-like, optional
A mapping from variable names to :py:class:`~xarray.DataArray`
objects, :py:class:`~xarray.Variable` objects or tuples of the
form ``(dims, data[, attrs])`` which can be used as arguments to
create a new ``Variable``. Each dimension must have the same length
in all variables in which it appears.
coords : dict-like, optional
Another mapping in the same form as the `variables` argument,
except the each item is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in `variables`.
Coordinates values may be given by 1-dimensional arrays or scalars,
in which case `dims` do not need to be supplied: 1D arrays will be
assumed to give index values along the dimension with the same
name.
attrs : dict-like, optional
Global attributes to save on this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts when initializing this dataset:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
"""
self._variables = OrderedDict()
self._coord_names = set()
self._dims = {}
self._attrs = None
self._file_obj = None
if data_vars is None:
data_vars = {}
if coords is None:
coords = {}
if data_vars is not None or coords is not None:
self._set_init_vars_and_dims(data_vars, coords, compat)
if attrs is not None:
self.attrs = attrs
self._encoding = None
self._initialized = True
def _set_init_vars_and_dims(self, data_vars, coords, compat):
"""Set the initial value of Dataset variables and dimensions
"""
both_data_and_coords = [k for k in data_vars if k in coords]
if both_data_and_coords:
raise ValueError('variables %r are found in both data_vars and '
'coords' % both_data_and_coords)
if isinstance(coords, Dataset):
coords = coords.variables
variables, coord_names, dims = merge_data_and_coords(
data_vars, coords, compat=compat)
self._variables = variables
self._coord_names = coord_names
self._dims = dims
@classmethod
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj._file_obj = store
return obj
@property
def variables(self):
"""Low level interface to Dataset contents as dict of Variable objects.
This ordered dictionary is frozen to prevent mutation that could
violate Dataset invariants. It contains all variable objects
constituting the Dataset, including both data variables and
coordinates.
"""
return Frozen(self._variables)
def _attrs_copy(self):
return None if self._attrs is None else OrderedDict(self._attrs)
@property
def attrs(self):
"""Dictionary of global attributes on this dataset
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of global encoding attributes on this dataset
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = dict(value)
@property
def dims(self):
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
Note that type of this object differs from `DataArray.dims`.
See `Dataset.sizes` and `DataArray.sizes` for consistently named
properties.
"""
return Frozen(SortedKeysDict(self._dims))
@property
def sizes(self):
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
This is an alias for `Dataset.dims` provided for the benefit of
consistency with `DataArray.sizes`.
See also
--------
DataArray.sizes
"""
return self.dims
def load(self, **kwargs):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
# load everything else sequentially
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self
def __dask_graph__(self):
graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}
graphs = {k: v for k, v in graphs.items() if v is not None}
if not graphs:
return None
else:
from dask import sharedict
return sharedict.merge(*graphs.values())
def __dask_keys__(self):
import dask
return [v.__dask_keys__() for v in self.variables.values()
if dask.is_dask_collection(v)]
@property
def __dask_optimize__(self):
import dask.array as da
return da.Array.__dask_optimize__
@property
def __dask_scheduler__(self):
import dask.array as da
return da.Array.__dask_scheduler__
def __dask_postcompute__(self):
import dask
info = [(True, k, v.__dask_postcompute__())
if dask.is_dask_collection(v) else
(False, k, v) for k, v in self._variables.items()]
return self._dask_postcompute, (info, self._coord_names, self._dims,
self._attrs, self._file_obj,
self._encoding)
def __dask_postpersist__(self):
import dask
info = [(True, k, v.__dask_postpersist__())
if dask.is_dask_collection(v) else
(False, k, v) for k, v in self._variables.items()]
return self._dask_postpersist, (info, self._coord_names, self._dims,
self._attrs, self._file_obj,
self._encoding)
@staticmethod
def _dask_postcompute(results, info, *args):
variables = OrderedDict()
results2 = list(results[::-1])
for is_dask, k, v in info:
if is_dask:
func, args2 = v
r = results2.pop()
result = func(r, *args2)
else:
result = v
variables[k] = result
final = Dataset._construct_direct(variables, *args)
return final
@staticmethod
def _dask_postpersist(dsk, info, *args):
variables = OrderedDict()
for is_dask, k, v in info:
if is_dask:
func, args2 = v
result = func(dsk, *args2)
else:
result = v
variables[k] = result
return Dataset._construct_direct(variables, *args)
def compute(self, **kwargs):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return a new dataset. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def _persist_inplace(self, **kwargs):
""" Persist all Dask arrays in memory """
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask
# evaluate all the dask arrays simultaneously
evaluated_data = dask.persist(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
return self
def persist(self, **kwargs):
""" Trigger computation, keeping data as dask arrays
This operation can be used to trigger computation on underlying dask
arrays, similar to ``.compute()``. However this operation keeps the
data as dask arrays. This is particularly useful when using the
dask.distributed scheduler and you want to load a large amount of data
into distributed memory.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
See Also
--------
dask.persist
"""
new = self.copy(deep=False)
return new._persist_inplace(**kwargs)
@classmethod
def _construct_direct(cls, variables, coord_names, dims=None, attrs=None,
file_obj=None, encoding=None):
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._attrs = attrs
obj._file_obj = file_obj
obj._encoding = encoding
obj._initialized = True
return obj
__default_attrs = object()
@classmethod
def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None):
dims = dict(calculate_dimensions(variables))
return cls._construct_direct(variables, coord_names, dims, attrs)
def _replace_vars_and_dims(self, variables, coord_names=None, dims=None,
attrs=__default_attrs, inplace=False):
"""Fastpath constructor for internal use.
Preserves coord names and attributes. If not provided explicitly,
dimensions are recalculated from the supplied variables.
The arguments are *not* copied when placed on the new dataset. It is up
to the caller to ensure that they have the right type and are not used
elsewhere.
Parameters
----------
variables : OrderedDict
coord_names : set or None, optional
attrs : OrderedDict or None, optional
Returns
-------
new : Dataset
"""
if dims is None:
dims = calculate_dimensions(variables)
if inplace:
self._dims = dims
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if attrs is not self.__default_attrs:
self._attrs = attrs
obj = self
else:
if coord_names is None:
coord_names = self._coord_names.copy()
if attrs is self.__default_attrs:
attrs = self._attrs_copy()
obj = self._construct_direct(variables, coord_names, dims, attrs)
return obj
def _replace_indexes(self, indexes):
if not len(indexes):
return self
variables = self._variables.copy()
for name, idx in indexes.items():
variables[name] = IndexVariable(name, idx)
obj = self._replace_vars_and_dims(variables)
# switch from dimension to level names, if necessary
dim_names = {}
for dim, idx in indexes.items():
if not isinstance(idx, pd.MultiIndex) and idx.name != dim:
dim_names[dim] = idx.name
if dim_names:
obj = obj.rename(dim_names)
return obj
def copy(self, deep=False):
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy of each of the component variable is made, so
that the underlying memory region of the new dataset is the same as in
the original dataset.
"""
variables = OrderedDict((k, v.copy(deep=deep))
for k, v in iteritems(self._variables))
# skip __init__ to avoid costly validation
return self._construct_direct(variables, self._coord_names.copy(),
self._dims.copy(), self._attrs_copy(),
encoding=self.encoding)
def _subset_with_all_valid_coords(self, variables, coord_names, attrs):
needed_dims = set()
for v in variables.values():
needed_dims.update(v.dims)
for k in self._coord_names:
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
dims = dict((k, self._dims[k]) for k in needed_dims)
return self._construct_direct(variables, coord_names, dims, attrs)
@property
def _level_coords(self):
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords = OrderedDict()
for cname in self._coord_names:
var = self.variables[cname]
if var.ndim == 1 and isinstance(var, IndexVariable):
level_names = var.level_names
if level_names is not None:
dim, = var.dims
level_coords.update({lname: dim for lname in level_names})
return level_coords
def _copy_listed(self, names):
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables = OrderedDict()
coord_names = set()
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
return self._subset_with_all_valid_coords(variables, coord_names,
attrs=self.attrs.copy())
def _construct_dataarray(self, name):
"""Construct a DataArray by indexing this dataset
"""
from .dataarray import DataArray
try:
variable = self._variables[name]
except KeyError:
_, name, variable = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
coords = OrderedDict()
needed_dims = set(variable.dims)
for k in self.coords:
if set(self.variables[k].dims) <= needed_dims:
coords[k] = self.variables[k]
return DataArray(variable, coords, name=name, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return self._item_sources + [self.attrs]
@property
def _item_sources(self):
"""List of places to look-up items for key-completion"""
return [self.data_vars, self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)]
def __dir__(self):
# In order to suppress a deprecation warning in Ipython autocompletion
# .T is explicitly removed from __dir__. GH: issue 1675
d = super(Dataset, self).__dir__()
d.remove('T')
return d
def __contains__(self, key):
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self):
warnings.warn('calling len() on an xarray.Dataset will change in '
'xarray v0.11 to only include data variables, not '
'coordinates. Call len() on the Dataset.variables '
'property instead, like ``len(ds.variables)``, to '
'preserve existing behavior in a forwards compatible '
'manner.',
FutureWarning, stacklevel=2)
return len(self._variables)
def __bool__(self):
warnings.warn('casting an xarray.Dataset to a boolean will change in '
'xarray v0.11 to only include data variables, not '
'coordinates. Cast the Dataset.variables property '
'instead to preserve existing behavior in a forwards '
'compatible manner.',
FutureWarning, stacklevel=2)
return bool(self._variables)
def __iter__(self):
warnings.warn('iteration over an xarray.Dataset will change in xarray '
'v0.11 to only include data variables, not coordinates. '
'Iterate over the Dataset.variables property instead to '
'preserve existing behavior in a forwards compatible '
'manner.',
FutureWarning, stacklevel=2)
return iter(self._variables)
def __array__(self, dtype=None):
raise TypeError('cannot directly convert an xarray.Dataset into a '
'numpy array. Instead, create an xarray.DataArray '
'first, either with indexing on the Dataset or by '
'invoking the `to_array()` method.')
@property
def nbytes(self):
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self):
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
def __getitem__(self, key):
"""Access variables or coordinates this dataset as a
:py:class:`~xarray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
"""
if utils.is_dict_like(key):
return self.isel(**key)
if hashable(key):
return self._construct_dataarray(key)
else:
return self._copy_listed(np.asarray(key))
def __setitem__(self, key, value):
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError('cannot yet use a dictionary as a key '
'to set Dataset values')
self.update({key: value})
def __delitem__(self, key):
"""Remove a variable from this dataset.
"""
del self._variables[key]
self._coord_names.discard(key)
# mutable objects should not be hashable
__hash__ = None
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
def compat(x, y):
return getattr(x, compat_str)(y)
return (self._coord_names == other._coord_names and
utils.dict_equiv(self._variables, other._variables,
compat=compat))
def broadcast_equals(self, other):
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
See Also
--------
Dataset.equals
Dataset.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisons (like numpy.ndarrays).
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self._variables, self._dims)
@property
def coords(self):
"""Dictionary of xarray.DataArray objects corresponding to coordinate
variables
"""
return DatasetCoordinates(self)
@property
def data_vars(self):
"""Dictionary of xarray.DataArray objects corresponding to data variables
"""
return DataVariables(self)
def set_coords(self, names, inplace=False):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
'cannot remove index coordinates with reset_coords: %s'
% bad_coords)
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store, encoder=None, sync=True, encoding=None,
unlimited_dims=None, compute=True):
"""Store dataset contents to a backends.*DataStore object."""
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(self)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding,
unlimited_dims=unlimited_dims)
if sync:
store.sync(compute=compute)
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None,
compute=True):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT','NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py
ones ``{'compression': 'gzip', 'compression_opts': 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
"""
if encoding is None:
encoding = {}
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format=format, group=group,
engine=engine, encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute)
def to_zarr(self, store=None, mode='w-', synchronizer=None, group=None,
encoding=None, compute=True):
"""Write dataset contents to a zarr group.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
Parameters
----------
store : MutableMapping or str, optional
Store or path to directory in file system.
mode : {'w', 'w-'}
Persistence mode: 'w' means create (overwrite if exists);
'w-' means create (fail if exists).
synchronizer : object, optional
Array synchronizer
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}``
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
"""
if encoding is None:
encoding = {}
if mode not in ['w', 'w-']:
# TODO: figure out how to handle 'r+' and 'a'
raise ValueError("The only supported options for mode are 'w' "
"and 'w-'.")
from ..backends.api import to_zarr
return to_zarr(self, store=store, mode=mode, synchronizer=synchronizer,
group=group, encoding=encoding, compute=compute)
def __unicode__(self):
return formatting.dataset_repr(self)
def info(self, buf=None):
"""
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
See Also
--------
pandas.DataFrame.assign
netCDF's ncdump
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(u'xarray.Dataset {')
lines.append(u'dimensions:')
for name, size in self.dims.items():
lines.append(u'\t{name} = {size} ;'.format(name=name, size=size))
lines.append(u'\nvariables:')
for name, da in self.variables.items():
dims = u', '.join(da.dims)
lines.append(u'\t{type} {name}({dims}) ;'.format(
type=da.dtype, name=name, dims=dims))
for k, v in da.attrs.items():
lines.append(u'\t\t{name}:{k} = {v} ;'.format(name=name, k=k,
v=v))
lines.append(u'\n// global attributes:')
for k, v in self.attrs.items():
lines.append(u'\t:{k} = {v} ;'.format(k=k, v=v))
lines.append(u'}')
buf.write(u'\n'.join(lines))
@property
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
for dim, c in zip(v.dims, v.chunks):
if dim in chunks and c != chunks[dim]:
raise ValueError('inconsistent chunks')
chunks[dim] = c
return Frozen(SortedKeysDict(chunks))
def chunk(self, chunks=None, name_prefix='xarray-', token=None,
lock=False):
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int or dict, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``.
name_prefix : str, optional
Prefix for the name of any new dask arrays.
token : str, optional
Token uniquely identifying this dataset.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Dataset
"""
try:
from dask.base import tokenize
except ImportError:
import dask # raise the usual error if dask is entirely missing # flake8: noqa
raise ImportError('xarray requires dask version 0.9 or newer')
if isinstance(chunks, Number):
chunks = dict.fromkeys(self.dims, chunks)
if chunks is not None:
bad_dims = [d for d in chunks if d not in self.dims]
if bad_dims:
raise ValueError('some chunks keys are not dimensions on this '
'object: %s' % bad_dims)
def selkeys(dict_, keys):
if dict_ is None:
return None
return dict((d, dict_[d]) for d in keys if d in dict_)
def maybe_chunk(name, var, chunks):
chunks = selkeys(chunks, var.dims)
if not chunks:
chunks = None
if var.ndim > 0:
token2 = tokenize(name, token if token else var._data)
name2 = '%s%s-%s' % (name_prefix, name, token2)
return var.chunk(chunks, name=name2, lock=lock)
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in self.variables.items()])
return self._replace_vars_and_dims(variables)
def _validate_indexers(self, indexers):
""" Here we make sure
+ indexer has a valid keys
+ indexer is in a valid data type
"""
from .dataarray import DataArray
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
# all indexers should be int, slice, np.ndarrays, or Variable
indexers_list = []
for k, v in iteritems(indexers):
if isinstance(v, integer_types + (slice, Variable)):
pass
elif isinstance(v, DataArray):
v = v.variable
elif isinstance(v, tuple):
v = as_variable(v)
elif isinstance(v, Dataset):
raise TypeError('cannot use a Dataset as an indexer')
else:
v = np.asarray(v)
indexers_list.append((k, v))
return indexers_list
def _get_indexers_coordinates(self, indexers):
""" Extract coordinates from indexers.
Returns an OrderedDict mapping from coordinate name to the
coordinate variable.
Only coordinate with a name different from any of self.variables will
be attached.
"""
from .dataarray import DataArray
coord_list = []
for k, v in indexers.items():
if isinstance(v, DataArray):
v_coords = v.coords
if v.dtype.kind == 'b':
if v.ndim != 1: # we only support 1-d boolean array
raise ValueError(
'{:d}d-boolean array is used for indexing along '
'dimension {!r}, but only 1d boolean arrays are '
'supported.'.format(v.ndim, k))
# Make sure in case of boolean DataArray, its
# coordinate also should be indexed.
v_coords = v[v.values.nonzero()[0]].coords
coord_list.append({d: v_coords[d].variable for d in v.coords})
# we don't need to call align() explicitly, because merge_variables
# already checks for exact alignment between dimension coordinates
coords = merge_variables(coord_list)
assert_coordinate_consistent(self, coords)
attached_coords = OrderedDict()
for k, v in coords.items(): # silently drop the conflicted variables.
if k not in self._variables:
attached_coords[k] = v
return attached_coords
def isel(self, drop=False, **indexers):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
"""
indexers_list = self._validate_indexers(indexers)
variables = OrderedDict()
for name, var in iteritems(self._variables):
var_indexers = {k: v for k, v in indexers_list if k in var.dims}
new_var = var.isel(**var_indexers)
if not (drop and name in var_indexers):
variables[name] = new_var
coord_names = set(variables).intersection(self._coord_names)
selected = self._replace_vars_and_dims(variables,
coord_names=coord_names)
# Extract coordinates from indexers
coord_vars = selected._get_indexers_coordinates(indexers)
variables.update(coord_vars)
coord_names = (set(variables)
.intersection(self._coord_names)
.union(coord_vars))
return self._replace_vars_and_dims(variables, coord_names=coord_names)
def sel(self, method=None, tolerance=None, drop=False, **indexers):
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.sel
"""
pos_indexers, new_indexes = remap_label_indexers(self, method,
tolerance, **indexers)
result = self.isel(drop=drop, **pos_indexers)
return result._replace_indexes(new_indexes)
def isel_points(self, dim='points', **indexers):
# type: (...) -> Dataset
"""Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.sel_points
DataArray.isel_points
"""
warnings.warn('Dataset.isel_points is deprecated: use Dataset.isel()'
'instead.', DeprecationWarning, stacklevel=2)
indexer_dims = set(indexers)
def take(variable, slices):
# Note: remove helper function when once when numpy
# supports vindex https://github.com/numpy/numpy/pull/6075
if hasattr(variable.data, 'vindex'):
# Special case for dask backed arrays to use vectorised list
# indexing
sel = variable.data.vindex[slices]
else:
# Otherwise assume backend is numpy array with 'fancy' indexing
sel = variable.data[slices]
return sel
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
coords = relevant_keys(self.coords)
indexers = [(k, np.asarray(v)) for k, v in iteritems(indexers)]
indexers_dict = dict(indexers)
non_indexed_dims = set(self.dims) - indexer_dims
non_indexed_coords = set(self.coords) - set(coords)
# All the indexers should be iterables
# Check that indexers are valid dims, integers, and 1D
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != 'i':
raise TypeError('Indexers must be integers')
if v.ndim != 1:
raise ValueError('Indexers must be 1 dimensional')
# all the indexers should have the same length
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError('All indexers must be the same length')
# Existing dimensions are not valid choices for the dim argument
if isinstance(dim, basestring):
if dim in self.dims:
# dim is an invalid string
raise ValueError('Existing dimension names are not valid '
'choices for the dim argument in sel_points')
elif hasattr(dim, 'dims'):
# dim is a DataArray or Coordinate
if dim.name in self.dims:
# dim already exists
raise ValueError('Existing dimensions are not valid choices '
'for the dim argument in sel_points')
# Set the new dim_name, and optionally the new dim coordinate
# dim is either an array-like or a string
if not utils.is_scalar(dim):
# dim is array like get name or assign 'points', get as variable
dim_name = 'points' if not hasattr(dim, 'name') else dim.name
dim_coord = as_variable(dim, name=dim_name)
else:
# dim is a string
dim_name = dim
dim_coord = None
reordered = self.transpose(
*(list(indexer_dims) + list(non_indexed_dims)))
variables = OrderedDict()
for name, var in reordered.variables.items():
if name in indexers_dict or any(
d in indexer_dims for d in var.dims):
# slice if var is an indexer or depends on an indexed dim
slc = [indexers_dict[k]
if k in indexers_dict
else slice(None) for k in var.dims]
var_dims = [dim_name] + [d for d in var.dims
if d in non_indexed_dims]
selection = take(var, tuple(slc))
var_subset = type(var)(var_dims, selection, var.attrs)
variables[name] = var_subset
else:
# If not indexed just add it back to variables or coordinates
variables[name] = var
coord_names = (set(coords) & set(variables)) | non_indexed_coords
dset = self._replace_vars_and_dims(variables, coord_names=coord_names)
# Add the dim coord to the new dset. Must be done after creation
# because_replace_vars_and_dims can only access existing coords,
# not add new ones
if dim_coord is not None:
dset.coords[dim_name] = dim_coord
return dset
def sel_points(self, dim='points', method=None, tolerance=None,
**indexers):
"""Returns a new dataset with each array indexed pointwise by tick
labels along the specified dimension(s).
In contrast to `Dataset.isel_points`, indexers for this method should
use labels instead of integers.
In contrast to `Dataset.sel`, this method selects points along the
diagonal of multi-dimensional arrays, not the intersection.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.isel_points
DataArray.sel_points
"""
warnings.warn('Dataset.sel_points is deprecated: use Dataset.sel()'
'instead.', DeprecationWarning, stacklevel=2)
pos_indexers, _ = indexing.remap_label_indexers(
self, indexers, method=method, tolerance=tolerance
)
return self.isel_points(dim=dim, **pos_indexers)
def reindex_like(self, other, method=None, tolerance=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found in this
dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
align
"""
indexers = alignment.reindex_like_indexers(self, other)
return self.reindex(method=method, copy=copy, tolerance=tolerance,
**indexers)
def reindex(self, indexers=None, method=None, tolerance=None, copy=True,
**kw_indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
**kw_indexers : optional
Keyword arguments in the same form as ``indexers``.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
"""
indexers = utils.combine_pos_and_kw_args(indexers, kw_indexers,
'reindex')
bad_dims = [d for d in indexers if d not in self.dims]
if bad_dims:
raise ValueError('invalid reindex dimensions: %s' % bad_dims)
variables = alignment.reindex_variables(
self.variables, self.sizes, self.indexes, indexers, method,
tolerance, copy=copy)
coord_names = set(self._coord_names)
coord_names.update(indexers)
return self._replace_vars_and_dims(variables, coord_names)
def rename(self, name_dict, inplace=False):
"""Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like
Dictionary whose keys are current variable or dimension names and
whose values are the desired names.
inplace : bool, optional
If True, rename variables and dimensions in-place. Otherwise,
return a new dataset object.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
DataArray.rename
"""
for k, v in name_dict.items():
if k not in self and k not in self.dims:
raise ValueError("cannot rename %r because it is not a "
"variable or dimension in this dataset" % k)
variables = OrderedDict()
coord_names = set()
for k, v in iteritems(self._variables):
name = name_dict.get(k, k)
dims = tuple(name_dict.get(dim, dim) for dim in v.dims)
var = v.copy(deep=False)
var.dims = dims
if name in variables:
raise ValueError('the new name %r conflicts' % (name,))
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
dims = OrderedDict((name_dict.get(k, k), v)
for k, v in self.dims.items())
return self._replace_vars_and_dims(variables, coord_names, dims=dims,
inplace=inplace)
def swap_dims(self, dims_dict, inplace=False):
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError('cannot swap from dimension %r because it is '
'not an existing dimension' % k)
if self.variables[v].dims != (k,):
raise ValueError('replacement dimension %r is not a 1D '
'variable along the old dimension %r'
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
variables = OrderedDict()
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
for k, v in iteritems(self.variables):
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
if k in result_dims:
var = v.to_index_variable()
else:
var = v.to_base_variable()
var.dims = dims
variables[k] = var
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def expand_dims(self, dim, axis=None):
"""Return a new object with an additional axis (or axes) inserted at the
corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str or sequence of str.
Dimensions to include on the new variable.
dimensions are inserted with length 1.
axis : integer, list (or tuple) of integers, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
the same length list. If axis=None is passed, all the axes will
be inserted to the start of the result array.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
"""
if isinstance(dim, int):
raise ValueError('dim should be str or sequence of strs or dict')
if isinstance(dim, basestring):
dim = [dim]
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if axis is None:
axis = list(range(len(dim)))
if len(dim) != len(axis):
raise ValueError('lengths of dim and axis should be identical.')
for d in dim:
if d in self.dims:
raise ValueError(
'Dimension {dim} already exists.'.format(dim=d))
if (d in self._variables and
not utils.is_scalar(self._variables[d])):
raise ValueError(
'{dim} already exists as coordinate or'
' variable name.'.format(dim=d))
if len(dim) != len(set(dim)):
raise ValueError('dims should not contain duplicate values.')
variables = OrderedDict()
for k, v in iteritems(self._variables):
if k not in dim:
if k in self._coord_names: # Do not change coordinates
variables[k] = v
else:
result_ndim = len(v.dims) + len(axis)
for a in axis:
if a < -result_ndim or result_ndim - 1 < a:
raise IndexError(
'Axis {a} is out of bounds of the expanded'
' dimension size {dim}.'.format(
a=a, v=k, dim=result_ndim))
axis_pos = [a if a >= 0 else result_ndim + a
for a in axis]
if len(axis_pos) != len(set(axis_pos)):
raise ValueError('axis should not contain duplicate'
' values.')
# We need to sort them to make sure `axis` equals to the
# axis positions of the result array.
zip_axis_dim = sorted(zip(axis_pos, dim))
all_dims = list(v.dims)
for a, d in zip_axis_dim:
all_dims.insert(a, d)
variables[k] = v.set_dims(all_dims)
else:
# If dims includes a label of a non-dimension coordinate,
# it will be promoted to a 1D coordinate with a single value.
variables[k] = v.set_dims(k)
return self._replace_vars_and_dims(variables, self._coord_names)
def set_index(self, append=False, inplace=False, **indexes):
"""Set Dataset (multi-)indexes using one or more existing coordinates or
variables.
Parameters
----------
append : bool, optional
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es) (default).
inplace : bool, optional
If True, set new index(es) in-place. Otherwise, return a new
Dataset object.
**indexes : {dim: index, ...}
Keyword arguments with names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reset_index
"""
variables, coord_names = merge_indexes(indexes, self._variables,
self._coord_names,
append=append)
return self._replace_vars_and_dims(variables, coord_names=coord_names,
inplace=inplace)
def reset_index(self, dims_or_levels, drop=False, inplace=False):
"""Reset the specified index(es) or multi-index level(s).
Parameters
----------
dims_or_levels : str or list
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, optional
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
inplace : bool, optional
If True, modify the dataset in-place. Otherwise, return a new
Dataset object.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.set_index
"""
variables, coord_names = split_indexes(dims_or_levels, self._variables,
self._coord_names,
self._level_coords, drop=drop)
return self._replace_vars_and_dims(variables, coord_names=coord_names,
inplace=inplace)
def reorder_levels(self, inplace=False, **dim_order):
"""Rearrange index levels using input order.
Parameters
----------
inplace : bool, optional
If True, modify the dataset in-place. Otherwise, return a new
DataArray object.
**dim_order : optional
Keyword arguments with names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced
coordinates.
"""
replace_variables = {}
for dim, order in dim_order.items():
coord = self._variables[dim]
index = coord.to_index()
if not isinstance(index, pd.MultiIndex):
raise ValueError("coordinate %r has no MultiIndex" % dim)
replace_variables[dim] = IndexVariable(coord.dims,
index.reorder_levels(order))
variables = self._variables.copy()
variables.update(replace_variables)
return self._replace_vars_and_dims(variables, inplace=inplace)
def _stack_once(self, dims, new_dim):
variables = OrderedDict()
for name, var in self.variables.items():
if name not in dims:
if any(d in var.dims for d in dims):
add_dims = [d for d in dims if d not in var.dims]
vdims = list(var.dims) + add_dims
shape = [self.dims[d] for d in vdims]
exp_var = var.set_dims(vdims, shape)
stacked_var = exp_var.stack(**{new_dim: dims})
variables[name] = stacked_var
else:
variables[name] = var.copy(deep=False)
# consider dropping levels that are unused?
levels = [self.get_index(dim) for dim in dims]
if hasattr(pd, 'RangeIndex'):
# RangeIndex levels in a MultiIndex are broken for appending in
# pandas before v0.19.0
levels = [pd.Int64Index(level)
if isinstance(level, pd.RangeIndex)
else level
for level in levels]
idx = utils.multiindex_from_product_levels(levels, names=dims)
variables[new_dim] = IndexVariable(new_dim, idx)
coord_names = set(self._coord_names) - set(dims) | set([new_dim])
return self._replace_vars_and_dims(variables, coord_names)
def stack(self, **dimensions):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
**dimensions : keyword arguments of the form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
Returns
-------
stacked : Dataset
Dataset with stacked data.
See also
--------
Dataset.unstack
"""
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def unstack(self, dim):
"""
Unstack an existing dimension corresponding to a MultiIndex into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str
Name of the existing dimension to unstack.
Returns
-------
unstacked : Dataset
Dataset with unstacked data.
See also
--------
Dataset.stack
"""
if dim not in self.dims:
raise ValueError('invalid dimension: %s' % dim)
index = self.get_index(dim)
if not isinstance(index, pd.MultiIndex):
raise ValueError('cannot unstack a dimension that does not have '
'a MultiIndex')
full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)
obj = self.reindex(copy=False, **{dim: full_idx})
new_dim_names = index.names
new_dim_sizes = [lev.size for lev in index.levels]
variables = OrderedDict()
for name, var in obj.variables.items():
if name != dim:
if dim in var.dims:
new_dims = OrderedDict(zip(new_dim_names, new_dim_sizes))
variables[name] = var.unstack(**{dim: new_dims})
else:
variables[name] = var
for name, lev in zip(new_dim_names, index.levels):
variables[name] = IndexVariable(name, lev)
coord_names = set(self._coord_names) - set([dim]) | set(new_dim_names)
return self._replace_vars_and_dims(variables, coord_names)
def update(self, other, inplace=True):
"""Update this dataset's variables with those from another dataset.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables with which to update this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
Returns
-------
updated : Dataset
Updated dataset.
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
"""
variables, coord_names, dims = dataset_update_method(self, other)
return self._replace_vars_and_dims(variables, coord_names, dims,
inplace=inplace)
def merge(self, other, inplace=False, overwrite_vars=frozenset(),
compat='no_conflicts', join='outer'):
"""Merge the arrays of two datasets into a single dataset.
This method generally not allow for overriding data, with the exception
of attributes, which are ignored on the second dataset. Variables with
the same name are checked for conflicts via the equals or identical
methods.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables to merge with this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
overwrite_vars : str or sequence, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {'broadcast_equals', 'equals', 'identical',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
- 'exact': error instead of aligning non-equal indexes
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
MergeError
If any variables conflict (see ``compat``).
"""
variables, coord_names, dims = dataset_merge_method(
self, other, overwrite_vars=overwrite_vars, compat=compat,
join=join)
return self._replace_vars_and_dims(variables, coord_names, dims,
inplace=inplace)
def _assert_all_in_dataset(self, names, virtual_okay=False):
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
raise ValueError('One or more of the specified variables '
'cannot be found in this dataset')
def drop(self, labels, dim=None):
"""Drop variables or index labels from this dataset.
Parameters
----------
labels : scalar or list of scalars
Name(s) of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops variables rather than index labels.
Returns
-------
dropped : Dataset
"""
if utils.is_scalar(labels):
labels = [labels]
if dim is None:
return self._drop_vars(labels)
else:
try:
index = self.indexes[dim]
except KeyError:
raise ValueError(
'dimension %r does not have coordinate labels' % dim)
new_index = index.drop(labels)
return self.loc[{dim: new_index}]
def _drop_vars(self, names):
self._assert_all_in_dataset(names)
drop = set(names)
variables = OrderedDict((k, v) for k, v in iteritems(self._variables)
if k not in drop)
coord_names = set(k for k in self._coord_names if k in variables)
return self._replace_vars_and_dims(variables, coord_names)
def transpose(self, *dims):
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
Although this operation returns a view of each array's data, it
is not lazy -- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
if dims:
if set(dims) ^ set(self.dims):
raise ValueError('arguments to transpose (%s) must be '
'permuted dataset dimensions (%s)'
% (dims, tuple(self.dims)))
ds = self.copy()
for name, var in iteritems(self._variables):
var_dims = tuple(dim for dim in dims if dim in var.dims)
ds._variables[name] = var.transpose(*var_dims)
return ds
@property
def T(self):
warnings.warn('xarray.Dataset.T has been deprecated as an alias for '
'`.transpose()`. It will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
return self.transpose()
def dropna(self, dim, how='any', thresh=None, subset=None):
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
subset : sequence, optional
Subset of variables to check for missing values. By default, all
variables in the dataset are checked.
Returns
-------
Dataset
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError('%s must be a single dataset dimension' % dim)
if subset is None:
subset = list(self.data_vars)
count = np.zeros(self.dims[dim], dtype=np.int64)
size = 0
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += np.asarray(array.count(dims))
size += np.prod([self.dims[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == size
elif how == 'all':
mask = count > 0
elif how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
return self.isel(**{dim: mask})
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
"""
if utils.is_dict_like(value):
value_keys = getattr(value, 'data_vars', value).keys()
if not set(value_keys) <= set(self.data_vars.keys()):
raise ValueError('all variables in the argument to `fillna` '
'must be contained in the original dataset')
out = ops.fillna(self, value)
return out
def interpolate_na(self, dim=None, method='linear', limit=None,
use_coordinate=True,
**kwargs):
"""Interpolate values according to different methods.
Parameters
----------
dim : str
Specifies the dimension along which to interpolate.
method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial', 'barycentric', 'krog', 'pchip',
'spline'}, optional
String indicating which method to use for interpolation:
- 'linear': linear interpolation (Default). Additional keyword
arguments are passed to ``numpy.interp``
- 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial': are passed to ``scipy.interpolate.interp1d``. If
method=='polynomial', the ``order`` keyword argument must also be
provided.
- 'barycentric', 'krog', 'pchip', 'spline': use their respective
``scipy.interpolate`` classes.
use_coordinate : boolean or str, default True
Specifies which index to use as the x values in the interpolation
formulated as `y = f(x)`. If False, values are treated as if
eqaully-spaced along `dim`. If True, the IndexVariable `dim` is
used. If use_coordinate is a string, it specifies the name of a
coordinate variariable to use as the index.
limit : int, default None
Maximum number of consecutive NaNs to fill. Must be greater than 0
or None for no limit.
Returns
-------
Dataset
See also
--------
numpy.interp
scipy.interpolate
"""
from .missing import interp_na, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(interp_na, self, dim=dim,
method=method, limit=limit,
use_coordinate=use_coordinate,
**kwargs)
return new
def ffill(self, dim, limit=None):
'''Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import ffill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new
def bfill(self, dim, limit=None):
'''Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import bfill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)
return new
def combine_first(self, other):
"""Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray
"""
out = ops.fillna(self, other, join="outer", dataset_join="outer")
return out
def reduce(self, func, dim=None, keep_attrs=False, numeric_only=False,
allow_lazy=False, **kwargs):
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
missing_dimensions = [dim for dim in dims if dim not in self.dims]
if missing_dimensions:
raise ValueError('Dataset does not contain the dimensions: %s'
% missing_dimensions)
variables = OrderedDict()
for name, var in iteritems(self._variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if name in self.coords:
if not reduce_dims:
variables[name] = var
else:
if (not numeric_only or
np.issubdtype(var.dtype, np.number) or
(var.dtype == np.bool_)):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.reduce(func, dim=reduce_dims,
keep_attrs=keep_attrs,
allow_lazy=allow_lazy,
**kwargs)
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
def apply(self, func, keep_attrs=False, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `f(x, **kwargs)` to
transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
"""
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in iteritems(self.data_vars))
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs)
def assign(self, **kwargs):
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on the Dataset and assigned to new data variables. If
the values are not callable, (e.g. a DataArray, scalar, or array),
they are simply assigned.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
"""
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(kwargs)
# ... and then assign
data.update(results)
return data
def to_array(self, dim='variable', name=None):
"""Convert this dataset into an xarray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
Name of the new dimension.
name : str, optional
Name of the new data array.
Returns
-------
array : xarray.DataArray
"""
from .dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)
coords = dict(self.coords)
coords[dim] = list(self.data_vars)
dims = (dim,) + broadcast_vars[0].dims
return DataArray(data, coords, dims, attrs=self.attrs, name=name)
def _to_dataframe(self, ordered_dims):
columns = [k for k in self.variables if k not in self.dims]
data = [self._variables[k].set_dims(ordered_dims).values.reshape(-1)
for k in columns]
index = self.coords.to_index(ordered_dims)
return pd.DataFrame(OrderedDict(zip(columns, data)), index=index)
def to_dataframe(self):
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is be indexed by the Cartesian product of
this dataset's indices.
"""
return self._to_dataframe(self.dims)
@classmethod
def from_dataframe(cls, dataframe):
"""Convert a pandas.DataFrame into an xarray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality).
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
if not dataframe.columns.is_unique:
raise ValueError(
'cannot convert DataFrame with non-unique columns')
idx = dataframe.index
obj = cls()
if isinstance(idx, pd.MultiIndex):
# it's a multi-index
# expand the DataFrame to include the product of all levels
full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)
dataframe = dataframe.reindex(full_idx)
dims = [name if name is not None else 'level_%i' % n
for n, name in enumerate(idx.names)]
for dim, lev in zip(dims, idx.levels):
obj[dim] = (dim, lev)
shape = [lev.size for lev in idx.levels]
else:
dims = (idx.name if idx.name is not None else 'index',)
obj[dims[0]] = (dims, idx)
shape = -1
for name, series in iteritems(dataframe):
data = np.asarray(series).reshape(shape)
obj[name] = (dims, data)
return obj
def to_dask_dataframe(self, dim_order=None, set_index=False):
"""
Convert this dataset into a dask.dataframe.DataFrame.
The dimensions, coordinates and data variables in this dataset form
the columns of the DataFrame.
Parameters
----------
dim_order : list, optional
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting dask
dataframe.
If provided, must include all dimensions on this dataset. By
default, dimensions are sorted alphabetically.
set_index : bool, optional
If set_index=True, the dask DataFrame is indexed by this dataset's
coordinate. Since dask DataFrames to not support multi-indexes,
set_index only works if the dataset only contains one dimension.
Returns
-------
dask.dataframe.DataFrame
"""
import dask.array as da
import dask.dataframe as dd
if dim_order is None:
dim_order = list(self.dims)
elif set(dim_order) != set(self.dims):
raise ValueError(
'dim_order {} does not match the set of dimensions on this '
'Dataset: {}'.format(dim_order, list(self.dims)))
ordered_dims = OrderedDict((k, self.dims[k]) for k in dim_order)
columns = list(ordered_dims)
columns.extend(k for k in self.coords if k not in self.dims)
columns.extend(self.data_vars)
series_list = []
for name in columns:
try:
var = self.variables[name]
except KeyError:
# dimension without a matching coordinate
size = self.dims[name]
data = da.arange(size, chunks=size, dtype=np.int64)
var = Variable((name,), data)
# IndexVariable objects have a dummy .chunk() method
if isinstance(var, IndexVariable):
var = var.to_base_variable()
dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data
series = dd.from_array(dask_array.reshape(-1), columns=[name])
series_list.append(series)
df = dd.concat(series_list, axis=1)
if set_index:
if len(dim_order) == 1:
(dim,) = dim_order
df = df.set_index(dim)
else:
# triggers an error about multi-indexes, even if only one
# dimension is passed
df = df.set_index(dim_order)
return df
def to_dict(self):
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
See also
--------
Dataset.from_dict
"""
d = {'coords': {}, 'attrs': decode_numpy_dict_values(self.attrs),
'dims': dict(self.dims), 'data_vars': {}}
for k in self.coords:
data = ensure_us_time_resolution(self[k].values).tolist()
d['coords'].update({
k: {'data': data,
'dims': self[k].dims,
'attrs': decode_numpy_dict_values(self[k].attrs)}})
for k in self.data_vars:
data = ensure_us_time_resolution(self[k].values).tolist()
d['data_vars'].update({
k: {'data': data,
'dims': self[k].dims,
'attrs': decode_numpy_dict_values(self[k].attrs)}})
return d
@classmethod
def from_dict(cls, d):
"""
Convert a dictionary into an xarray.Dataset.
Input dict can take several forms::
d = {'t': {'dims': ('t'), 'data': t},
'a': {'dims': ('t'), 'data': x},
'b': {'dims': ('t'), 'data': y}}
d = {'coords': {'t': {'dims': 't', 'data': t,
'attrs': {'units':'s'}}},
'attrs': {'title': 'air temperature'},
'dims': 't',
'data_vars': {'a': {'dims': 't', 'data': x, },
'b': {'dims': 't', 'data': y}}}
where 't' is the name of the dimesion, 'a' and 'b' are names of data
variables and t, x, and y are lists, numpy.arrays or pandas objects.
Parameters
----------
d : dict, with a minimum structure of {'var_0': {'dims': [..], \
'data': [..]}, \
...}
Returns
-------
obj : xarray.Dataset
See also
--------
Dataset.to_dict
DataArray.from_dict
"""
if not set(['coords', 'data_vars']).issubset(set(d)):
variables = d.items()
else:
import itertools
variables = itertools.chain(d.get('coords', {}).items(),
d.get('data_vars', {}).items())
try:
variable_dict = OrderedDict([(k, (v['dims'],
v['data'],
v.get('attrs'))) for
k, v in variables])
except KeyError as e:
raise ValueError(
"cannot convert dict without the key "
"'{dims_data}'".format(dims_data=str(e.args[0])))
obj = cls(variable_dict)
# what if coords aren't dims?
coords = set(d.get('coords', {})) - set(d.get('dims', {}))
obj = obj.set_coords(coords)
obj.attrs.update(d.get('attrs', {}))
return obj
@staticmethod
def _unary_op(f, keep_attrs=False):
@functools.wraps(f)
def func(self, *args, **kwargs):
ds = self.coords.to_dataset()
for k in self.data_vars:
ds._variables[k] = f(self._variables[k], *args, **kwargs)
if keep_attrs:
ds._attrs = self._attrs
return ds
return func
@staticmethod
def _binary_op(f, reflexive=False, join=None):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
return NotImplemented
align_type = OPTIONS['arithmetic_join'] if join is None else join
if hasattr(other, 'indexes'):
self, other = align(self, other, join=align_type, copy=False)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, join=align_type)
return ds
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a Dataset and '
'a grouped object are not permitted')
# we don't actually modify arrays in-place with in-place Dataset
# arithmetic -- this lets us automatically align things
if hasattr(other, 'indexes'):
other = other.reindex_like(self, copy=False)
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_vars_and_dims(ds._variables, ds._coord_names,
attrs=ds._attrs, inplace=True)
return self
return func
def _calculate_binary_op(self, f, other, join='inner',
inplace=False):
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
if inplace and set(lhs_data_vars) != set(rhs_data_vars):
raise ValueError('datasets must have the same data variables '
'for in-place arithmetic operations: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
dest_vars = OrderedDict()
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
elif join in ["left", "outer"]:
dest_vars[k] = f(lhs_vars[k], np.nan)
for k in rhs_data_vars:
if k not in dest_vars and join in ["right", "outer"]:
dest_vars[k] = f(rhs_vars[k], np.nan)
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(self.data_vars, other,
self.data_vars, other)
return Dataset(new_data_vars)
other_coords = getattr(other, 'coords', None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(self.data_vars, other.data_vars,
self.variables, other.variables)
else:
other_variable = getattr(other, 'variable', other)
new_vars = OrderedDict((k, f(self.variables[k], other_variable))
for k in self.data_vars)
ds._variables.update(new_vars)
ds._dims = calculate_dimensions(ds._variables)
return ds
def _copy_attrs_from(self, other):
self.attrs = other.attrs
for v in other.variables:
if v in self.variables:
self.variables[v].attrs = other.variables[v].attrs
def diff(self, dim, n=1, label='upper'):
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str, optional
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively. Other
values are not supported.
Returns
-------
difference : same type as caller
The n-th order finite difference of this object.
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})
>>> ds.diff('x')
<xarray.Dataset>
Dimensions: (x: 3)
Coordinates:
* x (x) int64 1 2 3
Data variables:
foo (x) int64 0 1 0
>>> ds.diff('x', 2)
<xarray.Dataset>
Dimensions: (x: 2)
Coordinates:
* x (x) int64 2 3
Data variables:
foo (x) int64 1 -1
"""
if n == 0:
return self
if n < 0:
raise ValueError('order `n` must be non-negative but got {0}'
''.format(n))
# prepare slices
kwargs_start = {dim: slice(None, -1)}
kwargs_end = {dim: slice(1, None)}
# prepare new coordinate
if label == 'upper':
kwargs_new = kwargs_end
elif label == 'lower':
kwargs_new = kwargs_start
else:
raise ValueError('The \'label\' argument has to be either '
'\'upper\' or \'lower\'')
variables = OrderedDict()
for name, var in iteritems(self.variables):
if dim in var.dims:
if name in self.data_vars:
variables[name] = (var.isel(**kwargs_end) -
var.isel(**kwargs_start))
else:
variables[name] = var.isel(**kwargs_new)
else:
variables[name] = var
difference = self._replace_vars_and_dims(variables)
if n > 1:
return difference.diff(dim, n - 1)
else:
return difference
def shift(self, **shifts):
"""Shift this dataset by an offset along one or more dimensions.
Only data variables are moved; coordinates stay in place. This is
consistent with the behavior of ``shift`` in pandas.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
Returns
-------
shifted : Dataset
Dataset with the same coordinates and attributes but shifted data
variables.
See also
--------
roll
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.shift(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 0 1 2 3 4
Data variables:
foo (x) object nan nan 'a' 'b' 'c'
"""
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
variables = OrderedDict()
for name, var in iteritems(self.variables):
if name in self.data_vars:
var_shifts = dict((k, v) for k, v in shifts.items()
if k in var.dims)
variables[name] = var.shift(**var_shifts)
else:
variables[name] = var
return self._replace_vars_and_dims(variables)
def roll(self, **shifts):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll rotates all variables, including coordinates. The
direction of rotation is consistent with :py:func:`numpy.roll`.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
"""
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
variables = OrderedDict()
for name, var in iteritems(self.variables):
var_shifts = dict((k, v) for k, v in shifts.items()
if k in var.dims)
variables[name] = var.roll(**var_shifts)
return self._replace_vars_and_dims(variables)
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: Dataset
A new dataset where all the specified dims are sorted by dim
labels.
"""
from .dataarray import DataArray
if not isinstance(variables, list):
variables = [variables]
else:
variables = variables
variables = [v if isinstance(v, DataArray) else self[v]
for v in variables]
aligned_vars = align(self, *variables, join='left')
aligned_self = aligned_vars[0]
aligned_other_vars = aligned_vars[1:]
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
if (data_array.dtype == object and
LooseVersion(np.__version__) < LooseVersion('1.11.0')):
raise NotImplementedError(
'sortby uses np.lexsort under the hood, which requires '
'numpy 1.11.0 or later to support object data-type.')
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(**indices)
def quantile(self, q, dim=None, interpolation='linear',
numeric_only=False, keep_attrs=False):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements for each variable
in the Dataset.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
Returns
-------
quantiles : Dataset
If `q` is a single quantile, then the result is a scalar for each
variable in data_vars. If multiple percentiles are given, first
axis of the result corresponds to the quantile and a quantile
dimension is added to the return Dataset. The other dimensions are
the dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
_assert_empty([dim for dim in dims if dim not in self.dims],
'Dataset does not contain the dimensions: %s')
q = np.asarray(q, dtype=np.float64)
variables = OrderedDict()
for name, var in iteritems(self.variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (not numeric_only or
np.issubdtype(var.dtype, np.number) or
var.dtype == np.bool_):
if len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.quantile(
q, dim=reduce_dims, interpolation=interpolation)
else:
variables[name] = var
# construct the new dataset
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
new = self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
if 'quantile' in new.dims:
new.coords['quantile'] = Variable('quantile', q)
else:
new.coords['quantile'] = q
return new
def rank(self, dim, pct=False, keep_attrs=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : Dataset
Variables that do not depend on `dim` are dropped.
"""
if dim not in self.dims:
raise ValueError(
'Dataset does not contain the dimension: %s' % dim)
variables = OrderedDict()
for name, var in iteritems(self.variables):
if name in self.data_vars:
if dim in var.dims:
variables[name] = var.rank(dim, pct=pct)
else:
variables[name] = var
coord_names = set(self.coords)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
@property
def real(self):
return self._unary_op(lambda x: x.real, keep_attrs=True)(self)
@property
def imag(self):
return self._unary_op(lambda x: x.imag, keep_attrs=True)(self)
def filter_by_attrs(self, **kwargs):
"""Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. Variables are returned
that contain all of the matches or callable returns True. If using a
callable note that it should accept a single parameter only,
the attribute value.
Parameters
----------
**kwargs : key=value
key : str
Attribute name.
value : callable or obj
If value is a callable, it should return a boolean in the form
of bool = func(attr) where attr is da.attrs[key].
Otherwise, value will be compared to the each
DataArray's attrs[key].
Returns
-------
new : Dataset
New dataset with variables filtered by attribute.
Examples
--------
>>> # Create an example dataset:
>>> import numpy as np
>>> import pandas as pd
>>> import xarray as xr
>>> temp = 15 + 8 * np.random.randn(2, 2, 3)
>>> precip = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> dims = ['x', 'y', 'time']
>>> temp_attr = dict(standard_name='air_potential_temperature')
>>> precip_attr = dict(standard_name='convective_precipitation_flux')
>>> ds = xr.Dataset({
... 'temperature': (dims, temp, temp_attr),
... 'precipitation': (dims, precip, precip_attr)},
... coords={
... 'lon': (['x', 'y'], lon),
... 'lat': (['x', 'y'], lat),
... 'time': pd.date_range('2014-09-06', periods=3),
... 'reference_time': pd.Timestamp('2014-09-05')})
>>> # Get variables matching a specific standard_name.
>>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
* x (x) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
lat (x, y) float64 42.25 42.21 42.63 42.59
* y (y) int64 0 1
reference_time datetime64[ns] 2014-09-05
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
Data variables:
precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...
>>> # Get all variables that have a standard_name attribute.
>>> standard_name = lambda v: v is not None
>>> ds.filter_by_attrs(standard_name=standard_name)
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* x (x) int64 0 1
* y (y) int64 0 1
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Data variables:
temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...
precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...
"""
selection = []
for var_name, variable in self.data_vars.items():
for attr_name, pattern in kwargs.items():
attr_value = variable.attrs.get(attr_name)
if ((callable(pattern) and pattern(attr_value)) or
attr_value == pattern):
selection.append(var_name)
return self[selection]
ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/dataset.py",
"copies": "1",
"size": "136388",
"license": "apache-2.0",
"hash": -8335074243642783000,
"line_mean": 37.7796417401,
"line_max": 92,
"alpha_frac": 0.563429334,
"autogenerated": false,
"ratio": 4.568806110143374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5632235444143374,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import warnings
from distutils.version import LooseVersion
from io import BytesIO
import numpy as np
from .. import Variable
from ..core.indexing import NumpyIndexingAdapter
from ..core.pycompat import OrderedDict, basestring, iteritems
from ..core.utils import Frozen, FrozenOrderedDict
from .common import BackendArray, DataStorePickleMixin, WritableCFDataStore
from .netcdf3 import (
encode_nc3_attr_value, encode_nc3_variable, is_valid_nc3_name)
def _decode_string(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return OrderedDict((k, v if k == '_FillValue' else _decode_string(v))
for (k, v) in iteritems(d))
class ScipyArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.dtype.kind +
str(array.dtype.itemsize))
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name].data
def __getitem__(self, key):
with self.datastore.ensure_open(autoclose=True):
data = NumpyIndexingAdapter(self.get_array())[key]
# Copy data if the source file is mmapped.
# This makes things consistent
# with the netCDF4 library by ensuring
# we can safely read arrays even
# after closing associated files.
copy = self.datastore.ds.use_mmap
return np.array(data, dtype=self.dtype, copy=copy)
def __setitem__(self, key, value):
with self.datastore.ensure_open(autoclose=True):
data = self.datastore.ds.variables[self.variable_name]
try:
data[key] = value
except TypeError:
if key is Ellipsis:
# workaround for GH: scipy/scipy#6880
data[:] = value
else:
raise
def _open_scipy_netcdf(filename, mode, mmap, version):
import scipy.io
import gzip
# if the string ends with .gz, then gunzip and open as netcdf file
if isinstance(filename, basestring) and filename.endswith('.gz'):
try:
return scipy.io.netcdf_file(gzip.open(filename), mode=mode,
mmap=mmap, version=version)
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
if 'is not a valid NetCDF 3 file' in e.message:
raise ValueError('gzipped file loading only supports '
'NetCDF 3 files.')
else:
raise
if isinstance(filename, bytes) and filename.startswith(b'CDF'):
# it's a NetCDF3 bytestring
filename = BytesIO(filename)
try:
return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap,
version=version)
except TypeError as e: # netcdf3 message is obscure in this case
errmsg = e.args[0]
if 'is not a valid NetCDF 3 file' in errmsg:
msg = """
If this is a NetCDF4 file, you may need to install the
netcdf4 library, e.g.,
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg)
else:
raise
class ScipyDataStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', format=None, group=None,
writer=None, mmap=None, autoclose=False, lock=None):
import scipy
import scipy.io
if (mode != 'r' and
scipy.__version__ < LooseVersion('0.13')): # pragma: no cover
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
if group is not None:
raise ValueError('cannot save to a group with the '
'scipy.io.netcdf backend')
if format is None or format == 'NETCDF3_64BIT':
version = 2
elif format == 'NETCDF3_CLASSIC':
version = 1
else:
raise ValueError('invalid format for scipy.io.netcdf backend: %r'
% format)
opener = functools.partial(_open_scipy_netcdf,
filename=filename_or_obj,
mode=mode, mmap=mmap, version=version)
self._ds = opener()
self._autoclose = autoclose
self._isopen = True
self._opener = opener
self._mode = mode
super(ScipyDataStore, self).__init__(writer, lock=lock)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
return Variable(var.dimensions, ScipyArrayWrapper(name, self),
_decode_attrs(var._attributes))
def get_variables(self):
with self.ensure_open(autoclose=False):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
with self.ensure_open(autoclose=True):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
with self.ensure_open(autoclose=True):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v is None}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
with self.ensure_open(autoclose=False):
if name in self.ds.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, dim_length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def encode_variable(self, variable):
variable = encode_nc3_variable(variable)
return variable
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
if check_encoding and variable.encoding:
if variable.encoding != {'_FillValue': None}:
raise ValueError('unexpected encoding for scipy backend: %r'
% list(variable.encoding))
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
if name not in self.ds.variables:
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in iteritems(variable.attrs):
self._validate_attr_key(k)
setattr(scipy_var, k, v)
target = ScipyArrayWrapper(name, self)
return target, data
def sync(self, compute=True):
if not compute:
raise NotImplementedError(
'compute=False is not supported for the scipy backend yet')
with self.ensure_open(autoclose=True):
super(ScipyDataStore, self).sync(compute=compute)
self.ds.flush()
def close(self):
self.ds.close()
self._isopen = False
def __exit__(self, type, value, tb):
self.close()
def __setstate__(self, state):
filename = state['_opener'].keywords['filename']
if hasattr(filename, 'seek'):
# it's a file-like object
# seek to the start of the file so scipy can read it
filename.seek(0)
super(ScipyDataStore, self).__setstate__(state)
self._ds = None
self._isopen = False
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/scipy_.py",
"copies": "1",
"size": "9010",
"license": "apache-2.0",
"hash": -4058955448310679000,
"line_mean": 35.7755102041,
"line_max": 78,
"alpha_frac": 0.584017758,
"autogenerated": false,
"ratio": 4.2884340790099955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5372451837009995,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import warnings
import numpy as np
import pandas as pd
from . import computation, groupby, indexing, ops, resample, rolling, utils
from ..plot.plot import _PlotMethods
from .accessors import DatetimeAccessor
from .alignment import align, reindex_like_indexers
from .common import AbstractArray, DataWithCoords
from .coordinates import (
DataArrayCoordinates, Indexes, LevelCoordinatesSource,
assert_coordinate_consistent, remap_label_indexers)
from .dataset import Dataset, merge_indexes, split_indexes
from .formatting import format_item
from .options import OPTIONS
from .pycompat import OrderedDict, basestring, iteritems, range, zip
from .utils import decode_numpy_dict_values, ensure_us_time_resolution
from .variable import (
IndexVariable, Variable, as_compatible_data, as_variable,
assert_unique_multiindex_level_names)
def _infer_coords_and_dims(shape, coords, dims):
"""All the logic for creating a new DataArray"""
if (coords is not None and not utils.is_dict_like(coords) and
len(coords) != len(shape)):
raise ValueError('coords is not dict-like, but it has %s items, '
'which does not match the %s dimensions of the '
'data' % (len(coords), len(shape)))
if isinstance(dims, basestring):
dims = (dims,)
if dims is None:
dims = ['dim_%s' % n for n in range(len(shape))]
if coords is not None and len(coords) == len(shape):
# try to infer dimensions from coords
if utils.is_dict_like(coords):
# deprecated in GH993, removed in GH1539
raise ValueError('inferring DataArray dimensions from '
'dictionary like ``coords`` is no longer '
'supported. Use an explicit list of '
'``dims`` instead.')
for n, (dim, coord) in enumerate(zip(dims, coords)):
coord = as_variable(coord,
name=dims[n]).to_index_variable()
dims[n] = coord.name
dims = tuple(dims)
else:
for d in dims:
if not isinstance(d, basestring):
raise TypeError('dimension %s is not a string' % d)
new_coords = OrderedDict()
if utils.is_dict_like(coords):
for k, v in coords.items():
new_coords[k] = as_variable(v, name=k)
elif coords is not None:
for dim, coord in zip(dims, coords):
var = as_variable(coord, name=dim)
var.dims = (dim,)
new_coords[dim] = var
sizes = dict(zip(dims, shape))
for k, v in new_coords.items():
if any(d not in dims for d in v.dims):
raise ValueError('coordinate %s has dimensions %s, but these '
'are not a subset of the DataArray '
'dimensions %s' % (k, v.dims, dims))
for d, s in zip(v.dims, v.shape):
if s != sizes[d]:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on the data but length %s on '
'coordinate %r' % (d, sizes[d], s, k))
if k in sizes and v.shape != (sizes[k],):
raise ValueError('coordinate %r is a DataArray dimension, but '
'it has shape %r rather than expected shape %r '
'matching the dimension size'
% (k, v.shape, (sizes[k],)))
assert_unique_multiindex_level_names(new_coords)
return new_coords, dims
class _LocIndexer(object):
def __init__(self, data_array):
self.data_array = data_array
def __getitem__(self, key):
if not utils.is_dict_like(key):
# expand the indexer so we can handle Ellipsis
labels = indexing.expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
return self.data_array.sel(**key)
def __setitem__(self, key, value):
if not utils.is_dict_like(key):
# expand the indexer so we can handle Ellipsis
labels = indexing.expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
pos_indexers, _ = remap_label_indexers(self.data_array, **key)
self.data_array[pos_indexers] = value
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = utils.ReprObject('<this-array>')
class DataArray(AbstractArray, DataWithCoords):
"""N-dimensional array with labeled coordinates and dimensions.
DataArray provides a wrapper around numpy ndarrays that uses labeled
dimensions and coordinates to support metadata aware operations. The API is
similar to that for the pandas Series or DataFrame, but DataArray objects
can have any number of dimensions, and their contents have fixed data
types.
Additional features over raw numpy arrays:
- Apply operations over dimensions by name: ``x.sum('time')``.
- Select or assign values by integer location (like numpy): ``x[:10]``
or by label (like pandas): ``x.loc['2014-01-01']`` or
``x.sel(time='2014-01-01')``.
- Mathematical operations (e.g., ``x - y``) vectorize across multiple
dimensions (known in numpy as "broadcasting") based on dimension names,
regardless of their original order.
- Keep track of arbitrary metadata in the form of a Python dictionary:
``x.attrs``
- Convert to a pandas Series: ``x.to_series()``.
Getting items from or doing mathematical operations with a DataArray
always returns another DataArray.
Attributes
----------
dims : tuple
Dimension names associated with this array.
values : np.ndarray
Access or modify DataArray values as a numpy array.
coords : dict-like
Dictionary of DataArray objects that label values along each dimension.
name : str or None
Name of this array.
attrs : OrderedDict
Dictionary for holding arbitrary metadata.
"""
_groupby_cls = groupby.DataArrayGroupBy
_rolling_cls = rolling.DataArrayRolling
_resample_cls = resample.DataArrayResample
dt = property(DatetimeAccessor)
def __init__(self, data, coords=None, dims=None, name=None,
attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
data : array_like
Values for this array. Must be an ``numpy.ndarray``, ndarray like,
or castable to an ``ndarray``. If a self-described xarray or pandas
object, attempts are made to use this array's metadata to fill in
other unspecified arguments. A view of the array's data is used
instead of a copy if possible.
coords : sequence or dict of array_like objects, optional
Coordinates (tick labels) to use for indexing along each dimension.
If dict-like, should be a mapping from dimension names to the
corresponding coordinates. If sequence-like, should be a sequence
of tuples where the first element is the dimension name and the
second element is the corresponding coordinate array_like object.
dims : str or sequence of str, optional
Name(s) of the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions. If this argument is omitted, dimension names
are taken from ``coords`` (if possible) and otherwise default to
``['dim_0', ... 'dim_n']``.
name : str or None, optional
Name of this array.
attrs : dict_like or None, optional
Attributes to assign to the new instance. By default, an empty
attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
'units' and 'calendar' (the later two only for datetime arrays).
Unrecognized keys are ignored.
"""
if fastpath:
variable = data
assert dims is None
assert attrs is None
assert encoding is None
else:
# try to fill in arguments from data if they weren't supplied
if coords is None:
coords = getattr(data, 'coords', None)
if isinstance(data, pd.Series):
coords = [data.index]
elif isinstance(data, pd.DataFrame):
coords = [data.index, data.columns]
elif isinstance(data, (pd.Index, IndexVariable)):
coords = [data]
elif isinstance(data, pd.Panel):
coords = [data.items, data.major_axis, data.minor_axis]
if dims is None:
dims = getattr(data, 'dims', getattr(coords, 'dims', None))
if name is None:
name = getattr(data, 'name', None)
if attrs is None:
attrs = getattr(data, 'attrs', None)
if encoding is None:
encoding = getattr(data, 'encoding', None)
data = as_compatible_data(data)
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
variable = Variable(dims, data, attrs, encoding, fastpath=True)
# uncomment for a useful consistency check:
# assert all(isinstance(v, Variable) for v in coords.values())
# These fully describe a DataArray
self._variable = variable
self._coords = coords
self._name = name
self._file_obj = None
self._initialized = True
__default = object()
def _replace(self, variable=None, coords=None, name=__default):
if variable is None:
variable = self.variable
if coords is None:
coords = self._coords
if name is self.__default:
name = self.name
return type(self)(variable, coords, name=name, fastpath=True)
def _replace_maybe_drop_dims(self, variable, name=__default):
if variable.dims == self.dims:
coords = None
else:
allowed_dims = set(variable.dims)
coords = OrderedDict((k, v) for k, v in self._coords.items()
if set(v.dims) <= allowed_dims)
return self._replace(variable, coords, name)
def _replace_indexes(self, indexes):
if not len(indexes):
return self
coords = self._coords.copy()
for name, idx in indexes.items():
coords[name] = IndexVariable(name, idx)
obj = self._replace(coords=coords)
# switch from dimension to level names, if necessary
dim_names = {}
for dim, idx in indexes.items():
if not isinstance(idx, pd.MultiIndex) and idx.name != dim:
dim_names[dim] = idx.name
if dim_names:
obj = obj.rename(dim_names)
return obj
def _to_temp_dataset(self):
return self._to_dataset_whole(name=_THIS_ARRAY,
shallow_copy=False)
def _from_temp_dataset(self, dataset, name=__default):
variable = dataset._variables.pop(_THIS_ARRAY)
coords = dataset._variables
return self._replace(variable, coords, name)
def _to_dataset_split(self, dim):
def subset(dim, label):
array = self.loc[{dim: label}]
if dim in array.coords:
del array.coords[dim]
array.attrs = {}
return array
variables = OrderedDict([(label, subset(dim, label))
for label in self.get_index(dim)])
coords = self.coords.to_dataset()
if dim in coords:
del coords[dim]
return Dataset(variables, coords, self.attrs)
def _to_dataset_whole(self, name=None, shallow_copy=True):
if name is None:
name = self.name
if name is None:
raise ValueError('unable to convert unnamed DataArray to a '
'Dataset without providing an explicit name')
if name in self.coords:
raise ValueError('cannot create a Dataset from a DataArray with '
'the same name as one of its coordinates')
# use private APIs for speed: this is called by _to_temp_dataset(),
# which is used in the guts of a lot of operations (e.g., reindex)
variables = self._coords.copy()
variables[name] = self.variable
if shallow_copy:
for k in variables:
variables[k] = variables[k].copy(deep=False)
coord_names = set(self._coords)
dataset = Dataset._from_vars_and_coord_names(variables, coord_names)
return dataset
def to_dataset(self, dim=None, name=None):
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
warnings.warn('the order of the arguments on DataArray.to_dataset '
'has changed; you now need to supply ``name`` as '
'a keyword argument',
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError('cannot supply both dim and name arguments')
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name)
@property
def name(self):
"""The name of this array.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def variable(self):
"""Low level interface to the Variable object for this DataArray."""
return self._variable
@property
def dtype(self):
return self.variable.dtype
@property
def shape(self):
return self.variable.shape
@property
def size(self):
return self.variable.size
@property
def nbytes(self):
return self.variable.nbytes
@property
def ndim(self):
return self.variable.ndim
def __len__(self):
return len(self.variable)
@property
def data(self):
"""The array's data as a dask or numpy array"""
return self.variable.data
@data.setter
def data(self, value):
self.variable.data = value
@property
def values(self):
"""The array's data as a numpy.ndarray"""
return self.variable.values
@values.setter
def values(self, value):
self.variable.values = value
@property
def _in_memory(self):
return self.variable._in_memory
def to_index(self):
"""Convert this variable to a pandas.Index. Only possible for 1D
arrays.
"""
return self.variable.to_index()
@property
def dims(self):
"""Tuple of dimension names associated with this array.
Note that the type of this property is inconsistent with
`Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for
consistently named properties.
"""
return self.variable.dims
@dims.setter
def dims(self, value):
raise AttributeError('you cannot assign dims on a DataArray. Use '
'.rename() or .swap_dims() instead.')
def _item_key_to_dict(self, key):
if utils.is_dict_like(key):
return key
else:
key = indexing.expanded_indexer(key, self.ndim)
return dict(zip(self.dims, key))
@property
def _level_coords(self):
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords = OrderedDict()
for cname, var in self._coords.items():
if var.ndim == 1 and isinstance(var, IndexVariable):
level_names = var.level_names
if level_names is not None:
dim, = var.dims
level_coords.update({lname: dim for lname in level_names})
return level_coords
def _getitem_coord(self, key):
from .dataset import _get_virtual_variable
try:
var = self._coords[key]
except KeyError:
dim_sizes = dict(zip(self.dims, self.shape))
_, key, var = _get_virtual_variable(
self._coords, key, self._level_coords, dim_sizes)
return self._replace_maybe_drop_dims(var, name=key)
def __getitem__(self, key):
if isinstance(key, basestring):
return self._getitem_coord(key)
else:
# xarray-style array indexing
return self.isel(**self._item_key_to_dict(key))
def __setitem__(self, key, value):
if isinstance(key, basestring):
self.coords[key] = value
else:
# Coordinates in key, value and self[key] should be consistent.
# TODO Coordinate consistency in key is checked here, but it
# causes unnecessary indexing. It should be optimized.
obj = self[key]
if isinstance(value, DataArray):
assert_coordinate_consistent(value, obj.coords.variables)
# DataArray key -> Variable key
key = {k: v.variable if isinstance(v, DataArray) else v
for k, v in self._item_key_to_dict(key).items()}
self.variable[key] = value
def __delitem__(self, key):
del self.coords[key]
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return self._item_sources + [self.attrs]
@property
def _item_sources(self):
"""List of places to look-up items for key-completion"""
return [self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)]
def __contains__(self, key):
warnings.warn(
'xarray.DataArray.__contains__ currently checks membership in '
'DataArray.coords, but in xarray v0.11 will change to check '
'membership in array values.', FutureWarning, stacklevel=2)
return key in self._coords
@property
def loc(self):
"""Attribute for location based indexing like pandas.
"""
return _LocIndexer(self)
@property
def attrs(self):
"""Dictionary storing arbitrary metadata with this array."""
return self.variable.attrs
@attrs.setter
def attrs(self, value):
self.variable.attrs = value
@property
def encoding(self):
"""Dictionary of format-specific settings for how this array should be
serialized."""
return self.variable.encoding
@encoding.setter
def encoding(self, value):
self.variable.encoding = value
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self._coords, self.sizes)
@property
def coords(self):
"""Dictionary-like container of coordinate arrays.
"""
return DataArrayCoordinates(self)
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables.
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset, or DataArray if ``drop == True``
"""
if inplace and not drop:
raise ValueError('cannot reset coordinates in-place on a '
'DataArray without ``drop == True``')
if names is None:
names = set(self.coords) - set(self.dims)
dataset = self.coords.to_dataset().reset_coords(names, drop)
if drop:
if inplace:
self._coords = dataset._variables
else:
return self._replace(coords=dataset._variables)
else:
if self.name is None:
raise ValueError('cannot reset_coords with drop=False '
'on an unnamed DataArrray')
dataset[self.name] = self.variable
return dataset
def __dask_graph__(self):
return self._to_temp_dataset().__dask_graph__()
def __dask_keys__(self):
return self._to_temp_dataset().__dask_keys__()
@property
def __dask_optimize__(self):
return self._to_temp_dataset().__dask_optimize__
@property
def __dask_scheduler__(self):
return self._to_temp_dataset().__dask_scheduler__
def __dask_postcompute__(self):
func, args = self._to_temp_dataset().__dask_postcompute__()
return self._dask_finalize, (func, args, self.name)
def __dask_postpersist__(self):
func, args = self._to_temp_dataset().__dask_postpersist__()
return self._dask_finalize, (func, args, self.name)
@staticmethod
def _dask_finalize(results, func, args, name):
ds = func(results, *args)
variable = ds._variables.pop(_THIS_ARRAY)
coords = ds._variables
return DataArray(variable, coords, name=name, fastpath=True)
def load(self, **kwargs):
"""Manually trigger loading of this array's data from disk or a
remote source into memory and return this array.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
ds = self._to_temp_dataset().load(**kwargs)
new = self._from_temp_dataset(ds)
self._variable = new._variable
self._coords = new._coords
return self
def compute(self, **kwargs):
"""Manually trigger loading of this array's data from disk or a
remote source into memory and return a new array. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def persist(self, **kwargs):
""" Trigger computation in constituent dask arrays
This keeps them as dask arrays but encourages them to keep data in
memory. This is particularly useful when on a distributed machine.
When on a single machine consider using ``.compute()`` instead.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
See Also
--------
dask.persist
"""
ds = self._to_temp_dataset().persist(**kwargs)
return self._from_temp_dataset(ds)
def copy(self, deep=True):
"""Returns a copy of this array.
If `deep=True`, a deep copy is made of all variables in the underlying
dataset. Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
"""
variable = self.variable.copy(deep=deep)
coords = OrderedDict((k, v.copy(deep=deep))
for k, v in self._coords.items())
return self._replace(variable, coords)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return self.variable.chunks
def chunk(self, chunks=None, name_prefix='xarray-', token=None,
lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name_prefix : str, optional
Prefix for the name of the new dask array.
token : str, optional
Token uniquely identifying this array.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.DataArray
"""
if isinstance(chunks, (list, tuple)):
chunks = dict(zip(self.dims, chunks))
ds = self._to_temp_dataset().chunk(chunks, name_prefix=name_prefix,
token=token, lock=lock)
return self._from_temp_dataset(ds)
def isel(self, drop=False, **indexers):
"""Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
"""
ds = self._to_temp_dataset().isel(drop=drop, **indexers)
return self._from_temp_dataset(ds)
def sel(self, method=None, tolerance=None, drop=False, **indexers):
"""Return a new DataArray whose dataset is given by selecting
index labels along the specified dimension(s).
.. warning::
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
da = xr.DataArray([0, 1, 2, 3], dims=['x'])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
Assigning values with the chained indexing using ``.sel`` or
``.isel`` fails silently.
See Also
--------
Dataset.sel
DataArray.isel
"""
ds = self._to_temp_dataset().sel(drop=drop, method=method,
tolerance=tolerance, **indexers)
return self._from_temp_dataset(ds)
def isel_points(self, dim='points', **indexers):
"""Return a new DataArray whose dataset is given by pointwise integer
indexing along the specified dimension(s).
See Also
--------
Dataset.isel_points
"""
ds = self._to_temp_dataset().isel_points(dim=dim, **indexers)
return self._from_temp_dataset(ds)
def sel_points(self, dim='points', method=None, tolerance=None,
**indexers):
"""Return a new DataArray whose dataset is given by pointwise selection
of index labels along the specified dimension(s).
See Also
--------
Dataset.sel_points
"""
ds = self._to_temp_dataset().sel_points(
dim=dim, method=method, tolerance=tolerance, **indexers)
return self._from_temp_dataset(ds)
def reindex_like(self, other, method=None, tolerance=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found on this
data array:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but coordinates from
the other object.
See Also
--------
DataArray.reindex
align
"""
indexers = reindex_like_indexers(self, other)
return self.reindex(method=method, tolerance=tolerance, copy=copy,
**indexers)
def reindex(self, method=None, tolerance=None, copy=True, **indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found on
this data array:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
**indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate
values will be filled in with NaN, and any mis-matched dimension
names will simply be ignored.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but replaced
coordinates.
See Also
--------
DataArray.reindex_like
align
"""
ds = self._to_temp_dataset().reindex(
method=method, tolerance=tolerance, copy=copy, **indexers)
return self._from_temp_dataset(ds)
def rename(self, new_name_or_name_dict):
"""Returns a new DataArray with renamed coordinates or a new name.
Parameters
----------
new_name_or_name_dict : str or dict-like
If the argument is dict-like, it it used as a mapping from old
names to new names for coordinates. Otherwise, use the argument
as the new name for this array.
Returns
-------
renamed : DataArray
Renamed array or array with renamed coordinates.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
if utils.is_dict_like(new_name_or_name_dict):
dataset = self._to_temp_dataset().rename(new_name_or_name_dict)
return self._from_temp_dataset(dataset)
else:
return self._replace(name=new_name_or_name_dict)
def swap_dims(self, dims_dict):
"""Returns a new DataArray with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a coordinate on this
array.
Returns
-------
renamed : Dataset
DataArray with swapped dimensions.
See Also
--------
DataArray.rename
Dataset.swap_dims
"""
ds = self._to_temp_dataset().swap_dims(dims_dict)
return self._from_temp_dataset(ds)
def expand_dims(self, dim, axis=None):
"""Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str or sequence of str.
Dimensions to include on the new variable.
dimensions are inserted with length 1.
axis : integer, list (or tuple) of integers, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
"""
ds = self._to_temp_dataset().expand_dims(dim, axis)
return self._from_temp_dataset(ds)
def set_index(self, append=False, inplace=False, **indexes):
"""Set DataArray (multi-)indexes using one or more existing
coordinates.
Parameters
----------
append : bool, optional
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es) (default).
inplace : bool, optional
If True, set new index(es) in-place. Otherwise, return a new
DataArray object.
**indexes : {dim: index, ...}
Keyword arguments with names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
Returns
-------
obj : DataArray
Another dataarray, with this data but replaced coordinates.
See Also
--------
DataArray.reset_index
"""
coords, _ = merge_indexes(indexes, self._coords, set(), append=append)
if inplace:
self._coords = coords
else:
return self._replace(coords=coords)
def reset_index(self, dims_or_levels, drop=False, inplace=False):
"""Reset the specified index(es) or multi-index level(s).
Parameters
----------
dims_or_levels : str or list
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, optional
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
inplace : bool, optional
If True, modify the dataarray in-place. Otherwise, return a new
DataArray object.
Returns
-------
obj : DataArray
Another dataarray, with this dataarray's data but replaced
coordinates.
See Also
--------
DataArray.set_index
"""
coords, _ = split_indexes(dims_or_levels, self._coords, set(),
self._level_coords, drop=drop)
if inplace:
self._coords = coords
else:
return self._replace(coords=coords)
def reorder_levels(self, inplace=False, **dim_order):
"""Rearrange index levels using input order.
Parameters
----------
inplace : bool, optional
If True, modify the dataarray in-place. Otherwise, return a new
DataArray object.
**dim_order : optional
Keyword arguments with names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
Returns
-------
obj : DataArray
Another dataarray, with this dataarray's data but replaced
coordinates.
"""
replace_coords = {}
for dim, order in dim_order.items():
coord = self._coords[dim]
index = coord.to_index()
if not isinstance(index, pd.MultiIndex):
raise ValueError("coordinate %r has no MultiIndex" % dim)
replace_coords[dim] = IndexVariable(coord.dims,
index.reorder_levels(order))
coords = self._coords.copy()
coords.update(replace_coords)
if inplace:
self._coords = coords
else:
return self._replace(coords=coords)
def stack(self, **dimensions):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
**dimensions : keyword arguments of the form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
Returns
-------
stacked : DataArray
DataArray with stacked data.
Examples
--------
>>> arr = DataArray(np.arange(6).reshape(2, 3),
... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])
>>> arr
<xarray.DataArray (x: 2, y: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Coordinates:
* x (x) |S1 'a' 'b'
* y (y) int64 0 1 2
>>> stacked = arr.stack(z=('x', 'y'))
>>> stacked.indexes['z']
MultiIndex(levels=[[u'a', u'b'], [0, 1, 2]],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[u'x', u'y'])
See also
--------
DataArray.unstack
"""
ds = self._to_temp_dataset().stack(**dimensions)
return self._from_temp_dataset(ds)
def unstack(self, dim):
"""
Unstack an existing dimension corresponding to a MultiIndex into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str
Name of the existing dimension to unstack.
Returns
-------
unstacked : DataArray
Array with unstacked data.
See also
--------
DataArray.stack
"""
ds = self._to_temp_dataset().unstack(dim)
return self._from_temp_dataset(ds)
def transpose(self, *dims):
"""Return a new DataArray object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : DataArray
The returned DataArray's array is transposed.
Notes
-----
Although this operation returns a view of this array's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
Dataset.transpose
"""
variable = self.variable.transpose(*dims)
return self._replace(variable)
def drop(self, labels, dim=None):
"""Drop coordinates or index labels from this DataArray.
Parameters
----------
labels : scalar or list of scalars
Name(s) of coordinate variables or index labels to drop.
dim : str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops coordinates rather than index labels.
Returns
-------
dropped : DataArray
"""
if utils.is_scalar(labels):
labels = [labels]
ds = self._to_temp_dataset().drop(labels, dim)
return self._from_temp_dataset(ds)
def dropna(self, dim, how='any', thresh=None):
"""Returns a new array with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
Returns
-------
DataArray
"""
ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh)
return self._from_temp_dataset(ds)
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray or DataArray
Used to fill all matching missing values in this array. If the
argument is a DataArray, it is first aligned with (reindexed to)
this array.
Returns
-------
DataArray
"""
if utils.is_dict_like(value):
raise TypeError('cannot provide fill value as a dictionary with '
'fillna on a DataArray')
out = ops.fillna(self, value)
return out
def interpolate_na(self, dim=None, method='linear', limit=None,
use_coordinate=True,
**kwargs):
"""Interpolate values according to different methods.
Parameters
----------
dim : str
Specifies the dimension along which to interpolate.
method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial', 'barycentric', 'krog', 'pchip',
'spline', 'akima'}, optional
String indicating which method to use for interpolation:
- 'linear': linear interpolation (Default). Additional keyword
arguments are passed to ``numpy.interp``
- 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial': are passed to ``scipy.interpolate.interp1d``. If
method=='polynomial', the ``order`` keyword argument must also be
provided.
- 'barycentric', 'krog', 'pchip', 'spline', and `akima`: use their
respective``scipy.interpolate`` classes.
use_coordinate : boolean or str, default True
Specifies which index to use as the x values in the interpolation
formulated as `y = f(x)`. If False, values are treated as if
eqaully-spaced along `dim`. If True, the IndexVariable `dim` is
used. If use_coordinate is a string, it specifies the name of a
coordinate variariable to use as the index.
limit : int, default None
Maximum number of consecutive NaNs to fill. Must be greater than 0
or None for no limit.
Returns
-------
DataArray
See also
--------
numpy.interp
scipy.interpolate
"""
from .missing import interp_na
return interp_na(self, dim=dim, method=method, limit=limit,
use_coordinate=use_coordinate, **kwargs)
def ffill(self, dim, limit=None):
'''Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
DataArray
'''
from .missing import ffill
return ffill(self, dim, limit=limit)
def bfill(self, dim, limit=None):
'''Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
DataArray
'''
from .missing import bfill
return bfill(self, dim, limit=limit)
def combine_first(self, other):
"""Combine two DataArray objects, with union of coordinates.
This operation follows the normal broadcasting and alignment rules of
``join='outer'``. Default to non-null values of array calling the
method. Use np.nan to fill in vacant cells after alignment.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray
"""
return ops.fillna(self, other, join="outer")
def reduce(self, func, dim=None, axis=None, keep_attrs=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to repeatedly apply `func`. Only one of the
'dim' and 'axis' arguments can be supplied. If neither are
supplied, then the reduction is calculated over the flattened array
(by calling `f(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
DataArray with this object's array replaced with an array with
summarized data and the indicated dimension(s) removed.
"""
var = self.variable.reduce(func, dim, axis, keep_attrs, **kwargs)
return self._replace_maybe_drop_dims(var)
def to_pandas(self):
"""Convert this array into a pandas object with the same shape.
The type of the returned object depends on the number of DataArray
dimensions:
* 1D -> `pandas.Series`
* 2D -> `pandas.DataFrame`
* 3D -> `pandas.Panel`
Only works for arrays with 3 or fewer dimensions.
The DataArray constructor performs the inverse transformation.
"""
# TODO: consolidate the info about pandas constructors and the
# attributes that correspond to their indexes into a separate module?
constructors = {0: lambda x: x,
1: pd.Series,
2: pd.DataFrame,
3: pd.Panel}
try:
constructor = constructors[self.ndim]
except KeyError:
raise ValueError('cannot convert arrays with %s dimensions into '
'pandas objects' % self.ndim)
indexes = [self.get_index(dim) for dim in self.dims]
return constructor(self.values, *indexes)
def to_dataframe(self, name=None):
"""Convert this array and its coordinates into a tidy pandas.DataFrame.
The DataFrame is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`).
Other coordinates are included as columns in the DataFrame.
"""
if name is None:
name = self.name
if name is None:
raise ValueError('cannot convert an unnamed DataArray to a '
'DataFrame: use the ``name`` parameter')
dims = OrderedDict(zip(self.dims, self.shape))
# By using a unique name, we can convert a DataArray into a DataFrame
# even if it shares a name with one of its coordinates.
# I would normally use unique_name = object() but that results in a
# dataframe with columns in the wrong order, for reasons I have not
# been able to debug (possibly a pandas bug?).
unique_name = '__unique_name_identifier_z98xfz98xugfg73ho__'
ds = self._to_dataset_whole(name=unique_name)
df = ds._to_dataframe(dims)
df.columns = [name if c == unique_name else c
for c in df.columns]
return df
def to_series(self):
"""Convert this array into a pandas.Series.
The Series is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`).
"""
index = self.coords.to_index()
return pd.Series(self.values.reshape(-1), index=index, name=self.name)
def to_masked_array(self, copy=True):
"""Convert this array into a numpy.ma.MaskedArray
Parameters
----------
copy : bool
If True (default) make a copy of the array in the result. If False,
a MaskedArray view of DataArray.values is returned.
Returns
-------
result : MaskedArray
Masked where invalid values (nan or inf) occur.
"""
isnull = pd.isnull(self.values)
return np.ma.MaskedArray(data=self.values, mask=isnull, copy=copy)
def to_netcdf(self, *args, **kwargs):
"""Write DataArray contents to a netCDF file.
Parameters
----------
path : str or Path, optional
Path to which to save this dataset. If no path is provided, this
function returns the resulting netCDF file as a bytes object; in
this case, we need to use scipy.io.netcdf, which does not support
netCDF version 4 (the default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
Notes
-----
Only xarray.Dataset objects can be written to netCDF files, so
the xarray.DataArray is converted to a xarray.Dataset object
containing a single variable. If the DataArray has no name, or if the
name is the same as a co-ordinate name, then it is given the name
'__xarray_dataarray_variable__'.
All parameters are passed directly to `xarray.Dataset.to_netcdf`.
"""
from ..backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
if self.name is None:
# If no name is set then use a generic xarray name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
elif self.name in self.coords or self.name in self.dims:
# The name is the same as one of the coords names, which netCDF
# doesn't support, so rename it but keep track of the old name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
dataset.attrs[DATAARRAY_NAME] = self.name
else:
# No problems with the name - so we're fine!
dataset = self.to_dataset()
return dataset.to_netcdf(*args, **kwargs)
def to_dict(self):
"""
Convert this xarray.DataArray into a dictionary following xarray
naming conventions.
Converts all variables and attributes to native Python objects.
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
See also
--------
DataArray.from_dict
"""
d = {'coords': {}, 'attrs': decode_numpy_dict_values(self.attrs),
'dims': self.dims}
for k in self.coords:
data = ensure_us_time_resolution(self[k].values).tolist()
d['coords'].update({
k: {'data': data,
'dims': self[k].dims,
'attrs': decode_numpy_dict_values(self[k].attrs)}})
d.update({'data': ensure_us_time_resolution(self.values).tolist(),
'name': self.name})
return d
@classmethod
def from_dict(cls, d):
"""
Convert a dictionary into an xarray.DataArray
Input dict can take several forms::
d = {'dims': ('t'), 'data': x}
d = {'coords': {'t': {'dims': 't', 'data': t,
'attrs': {'units':'s'}}},
'attrs': {'title': 'air temperature'},
'dims': 't',
'data': x,
'name': 'a'}
where 't' is the name of the dimesion, 'a' is the name of the array,
and x and t are lists, numpy.arrays, or pandas objects.
Parameters
----------
d : dict, with a minimum structure of {'dims': [..], 'data': [..]}
Returns
-------
obj : xarray.DataArray
See also
--------
DataArray.to_dict
Dataset.from_dict
"""
coords = None
if 'coords' in d:
try:
coords = OrderedDict([(k, (v['dims'],
v['data'],
v.get('attrs')))
for k, v in d['coords'].items()])
except KeyError as e:
raise ValueError(
"cannot convert dict when coords are missing the key "
"'{dims_data}'".format(dims_data=str(e.args[0])))
try:
data = d['data']
except KeyError:
raise ValueError("cannot convert dict without the key 'data''")
else:
obj = cls(data, coords, d.get('dims'), d.get('name'),
d.get('attrs'))
return obj
@classmethod
def from_series(cls, series):
"""Convert a pandas.Series into an xarray.DataArray.
If the series's index is a MultiIndex, it will be expanded into a
tensor product of one-dimensional coordinates (filling in missing
values with NaN). Thus this operation should be the inverse of the
`to_series` method.
"""
# TODO: add a 'name' parameter
name = series.name
df = pd.DataFrame({name: series})
ds = Dataset.from_dataframe(df)
return ds[name]
def to_cdms2(self):
"""Convert this array into a cdms2.Variable
"""
from ..convert import to_cdms2
return to_cdms2(self)
@classmethod
def from_cdms2(cls, variable):
"""Convert a cdms2.Variable into an xarray.DataArray
"""
from ..convert import from_cdms2
return from_cdms2(variable)
def to_iris(self):
"""Convert this array into a iris.cube.Cube
"""
from ..convert import to_iris
return to_iris(self)
@classmethod
def from_iris(cls, cube):
"""Convert a iris.cube.Cube into an xarray.DataArray
"""
from ..convert import from_iris
return from_iris(cube)
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
def compat(x, y):
return getattr(x.variable, compat_str)(y.variable)
return (utils.dict_equiv(self.coords, other.coords, compat=compat) and
compat(self, other))
def broadcast_equals(self, other):
"""Two DataArrays are broadcast equal if they are equal after
broadcasting them against each other such that they have the same
dimensions.
See Also
--------
DataArray.equals
DataArray.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""True if two DataArrays have the same dimensions, coordinates and
values; otherwise False.
DataArrays can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``DataArray``
does element-wise comparisons (like numpy.ndarrays).
See Also
--------
DataArray.broadcast_equals
DataArray.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks the array name and attributes, and
attributes on all coordinates.
See Also
--------
DataArray.broadcast_equals
DataArray.equal
"""
try:
return (self.name == other.name and
self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
__default_name = object()
def _result_name(self, other=None):
# use the same naming heuristics as pandas:
# https://github.com/ContinuumIO/blaze/issues/458#issuecomment-51936356
other_name = getattr(other, 'name', self.__default_name)
if other_name is self.__default_name or other_name == self.name:
return self.name
else:
return None
def __array_wrap__(self, obj, context=None):
new_var = self.variable.__array_wrap__(obj, context)
return self._replace(new_var)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
with np.errstate(all='ignore'):
return self.__array_wrap__(f(self.variable.data, *args,
**kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, join=None, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (Dataset, groupby.GroupBy)):
return NotImplemented
if hasattr(other, 'indexes'):
align_type = (OPTIONS['arithmetic_join']
if join is None else join)
self, other = align(self, other, join=align_type, copy=False)
other_variable = getattr(other, 'variable', other)
other_coords = getattr(other, 'coords', None)
variable = (f(self.variable, other_variable)
if not reflexive
else f(other_variable, self.variable))
coords = self.coords._merge_raw(other_coords)
name = self._result_name(other)
return self._replace(variable, coords, name)
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a DataArray and '
'a grouped object are not permitted')
# n.b. we can't align other to self (with other.reindex_like(self))
# because `other` may be converted into floats, which would cause
# in-place arithmetic to fail unpredictably. Instead, we simply
# don't support automatic alignment with in-place arithmetic.
other_coords = getattr(other, 'coords', None)
other_variable = getattr(other, 'variable', other)
with self.coords._merge_inplace(other_coords):
f(self.variable, other_variable)
return self
return func
def _copy_attrs_from(self, other):
self.attrs = other.attrs
@property
def plot(self):
"""
Access plotting functions
>>> d = DataArray([[1, 2], [3, 4]])
For convenience just call this directly
>>> d.plot()
Or use it as a namespace to use xarray.plot functions as
DataArray methods
>>> d.plot.imshow() # equivalent to xarray.plot.imshow(d)
"""
return _PlotMethods(self)
def _title_for_slice(self, truncate=50):
"""
If the dataarray has 1 dimensional coordinates or comes from a slice
we can show that info in the title
Parameters
----------
truncate : integer
maximum number of characters for title
Returns
-------
title : string
Can be used for plot titles
"""
one_dims = []
for dim, coord in iteritems(self.coords):
if coord.size == 1:
one_dims.append('{dim} = {v}'.format(
dim=dim, v=format_item(coord.values)))
title = ', '.join(one_dims)
if len(title) > truncate:
title = title[:(truncate - 3)] + '...'
return title
def diff(self, dim, n=1, label='upper'):
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str, optional
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively. Other
values are not supported.
Returns
-------
difference : same type as caller
The n-th order finite difference of this object.
Examples
--------
>>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])
>>> arr.diff('x')
<xarray.DataArray (x: 3)>
array([0, 1, 0])
Coordinates:
* x (x) int64 2 3 4
>>> arr.diff('x', 2)
<xarray.DataArray (x: 2)>
array([ 1, -1])
Coordinates:
* x (x) int64 3 4
"""
ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label)
return self._from_temp_dataset(ds)
def shift(self, **shifts):
"""Shift this array by an offset along one or more dimensions.
Only the data is moved; coordinates stay in place. Values shifted from
beyond array bounds are replaced by NaN. This is consistent with the
behavior of ``shift`` in pandas.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
Returns
-------
shifted : DataArray
DataArray with the same coordinates and attributes but shifted
data.
See also
--------
roll
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims='x')
>>> arr.shift(x=1)
<xarray.DataArray (x: 3)>
array([ nan, 5., 6.])
Coordinates:
* x (x) int64 0 1 2
"""
variable = self.variable.shift(**shifts)
return self._replace(variable)
def roll(self, **shifts):
"""Roll this array by an offset along one or more dimensions.
Unlike shift, roll rotates all variables, including coordinates. The
direction of rotation is consistent with :py:func:`numpy.roll`.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
Returns
-------
rolled : DataArray
DataArray with the same attributes but rolled data and coordinates.
See also
--------
shift
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims='x')
>>> arr.roll(x=1)
<xarray.DataArray (x: 3)>
array([7, 5, 6])
Coordinates:
* x (x) int64 2 0 1
"""
ds = self._to_temp_dataset().roll(**shifts)
return self._from_temp_dataset(ds)
@property
def real(self):
return self._replace(self.variable.real)
@property
def imag(self):
return self._replace(self.variable.imag)
def dot(self, other, dims=None):
"""Perform dot product of two DataArrays along their shared dims.
Equivalent to taking taking tensordot over all shared dims.
Parameters
----------
other : DataArray
The other array with which the dot product is performed.
dims: list of strings, optional
Along which dimensions to be summed over. Default all the common
dimensions are summed over.
Returns
-------
result : DataArray
Array resulting from the dot product over all shared dimensions.
See also
--------
dot
numpy.tensordot
Examples
--------
>>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))
>>> da = DataArray(da_vals, dims=['x', 'y', 'z'])
>>> dm_vals = np.arange(4)
>>> dm = DataArray(dm_vals, dims=['z'])
>>> dm.dims
('z')
>>> da.dims
('x', 'y', 'z')
>>> dot_result = da.dot(dm)
>>> dot_result.dims
('x', 'y')
"""
if isinstance(other, Dataset):
raise NotImplementedError('dot products are not yet supported '
'with Dataset objects.')
if not isinstance(other, DataArray):
raise TypeError('dot only operates on DataArrays.')
return computation.dot(self, other, dims=dims)
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataarray, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords whose values are used to sort this array.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: DataArray
A new dataarray where all the specified dims are sorted by dim
labels.
Examples
--------
>>> da = xr.DataArray(np.random.rand(5),
... coords=[pd.date_range('1/1/2000', periods=5)],
... dims='time')
>>> da
<xarray.DataArray (time: 5)>
array([ 0.965471, 0.615637, 0.26532 , 0.270962, 0.552878])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
>>> da.sortby(da)
<xarray.DataArray (time: 5)>
array([ 0.26532 , 0.270962, 0.552878, 0.615637, 0.965471])
Coordinates:
* time (time) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05 ...
"""
ds = self._to_temp_dataset().sortby(variables, ascending=ascending)
return self._from_temp_dataset(ds)
def quantile(self, q, dim=None, interpolation='linear', keep_attrs=False):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : DataArray
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile
"""
ds = self._to_temp_dataset().quantile(
q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation)
return self._from_temp_dataset(ds)
def rank(self, dim, pct=False, keep_attrs=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : DataArray
DataArray with the same coordinates and dtype 'float64'.
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims='x')
>>> arr.rank('x')
<xarray.DataArray (x: 3)>
array([ 1., 2., 3.])
Dimensions without coordinates: x
"""
ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)
return self._from_temp_dataset(ds)
# priority most be higher than Variable to properly work with binary ufuncs
ops.inject_all_ops_and_reduce_methods(DataArray, priority=60)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/dataarray.py",
"copies": "1",
"size": "77486",
"license": "apache-2.0",
"hash": -824658212618687600,
"line_mean": 35.3784037559,
"line_max": 79,
"alpha_frac": 0.573639109,
"autogenerated": false,
"ratio": 4.540106638542216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5613745747542217,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
from ._compat import new_class
from ._make import _make_ne
_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
def cmp_using(
eq=None,
lt=None,
le=None,
gt=None,
ge=None,
require_same_type=True,
class_name="Comparable",
):
"""
Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
``cmp`` arguments to customize field comparison.
The resulting class will have a full set of ordering methods if
at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
:param Optional[callable] eq: `callable` used to evaluate equality
of two objects.
:param Optional[callable] lt: `callable` used to evaluate whether
one object is less than another object.
:param Optional[callable] le: `callable` used to evaluate whether
one object is less than or equal to another object.
:param Optional[callable] gt: `callable` used to evaluate whether
one object is greater than another object.
:param Optional[callable] ge: `callable` used to evaluate whether
one object is greater than or equal to another object.
:param bool require_same_type: When `True`, equality and ordering methods
will return `NotImplemented` if objects are not of the same type.
:param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
See `comparison` for more details.
.. versionadded:: 21.1.0
"""
body = {
"__slots__": ["value"],
"__init__": _make_init(),
"_requirements": [],
"_is_comparable_to": _is_comparable_to,
}
# Add operations.
num_order_functions = 0
has_eq_function = False
if eq is not None:
has_eq_function = True
body["__eq__"] = _make_operator("eq", eq)
body["__ne__"] = _make_ne()
if lt is not None:
num_order_functions += 1
body["__lt__"] = _make_operator("lt", lt)
if le is not None:
num_order_functions += 1
body["__le__"] = _make_operator("le", le)
if gt is not None:
num_order_functions += 1
body["__gt__"] = _make_operator("gt", gt)
if ge is not None:
num_order_functions += 1
body["__ge__"] = _make_operator("ge", ge)
type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body))
# Add same type requirement.
if require_same_type:
type_._requirements.append(_check_same_type)
# Add total ordering if at least one operation was defined.
if 0 < num_order_functions < 4:
if not has_eq_function:
# functools.total_ordering requires __eq__ to be defined,
# so raise early error here to keep a nice stack.
raise ValueError(
"eq must be define is order to complete ordering from "
"lt, le, gt, ge."
)
type_ = functools.total_ordering(type_)
return type_
def _make_init():
"""
Create __init__ method.
"""
def __init__(self, value):
"""
Initialize object with *value*.
"""
self.value = value
return __init__
def _make_operator(name, func):
"""
Create operator method.
"""
def method(self, other):
if not self._is_comparable_to(other):
return NotImplemented
result = func(self.value, other.value)
if result is NotImplemented:
return NotImplemented
return result
method.__name__ = "__%s__" % (name,)
method.__doc__ = "Return a %s b. Computed by attrs." % (
_operation_names[name],
)
return method
def _is_comparable_to(self, other):
"""
Check whether `other` is comparable to `self`.
"""
for func in self._requirements:
if not func(self, other):
return False
return True
def _check_same_type(self, other):
"""
Return True if *self* and *other* are of the same type, False otherwise.
"""
return other.value.__class__ is self.value.__class__
| {
"repo_name": "python-attrs/attrs",
"path": "src/attr/_cmp.py",
"copies": "2",
"size": "4133",
"license": "mit",
"hash": -8286033524124914000,
"line_mean": 26.1907894737,
"line_max": 77,
"alpha_frac": 0.5831115413,
"autogenerated": false,
"ratio": 3.8843984962406015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467510037540602,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
from toolz import merge
from multipledispatch import Dispatcher
from .convert import convert
from .append import append
from .resource import resource
from .utils import ignoring
import datashape
from datashape import discover
from datashape.dispatch import namespace
from datashape.predicates import isdimension
from .compatibility import unicode
from pandas import DataFrame, Series
from numpy import ndarray
not_appendable_types = DataFrame, Series, ndarray, tuple
__all__ = 'into',
if 'into' not in namespace:
namespace['into'] = Dispatcher('into')
into = namespace['into']
def validate(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
dshape = kwargs.pop('dshape', None)
if isinstance(dshape, (str, unicode)):
dshape = datashape.dshape(dshape)
if dshape is not None and not isinstance(dshape, datashape.DataShape):
raise TypeError('dshape argument is not an instance of DataShape')
kwargs['dshape'] = dshape
return f(*args, **kwargs)
return wrapped
@into.register(type, object)
@validate
def into_type(a, b, dshape=None, **kwargs):
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(b)
return convert(a, b, dshape=dshape, **kwargs)
@into.register(object, object)
@validate
def into_object(target, source, dshape=None, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename'
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Examples
--------
>>> L = into(list, (1, 2, 3)) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = into(L, (4, 5, 6)) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> into('myfile.csv', [('Alice', 1), ('Bob', 2)]) # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> into('accounts.json', [('Alice', 100), ('Bob', 200)], dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> into(list, 'accounts.csv', has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
into.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
into.convert.convert - Convert things into new things
into.append.append - Add things onto existing things
"""
if isinstance(source, (str, unicode)):
source = resource(source, dshape=dshape, **kwargs)
if type(target) in not_appendable_types:
raise TypeError('target of %s type does not support in-place append' % type(target))
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(source)
return append(target, source, dshape=dshape, **kwargs)
@into.register((str, unicode), object)
@validate
def into_string(uri, b, dshape=None, **kwargs):
if dshape is None:
dshape = discover(b)
resource_ds = 0 * dshape.subshape[0] if isdimension(dshape[0]) else dshape
a = resource(uri, dshape=resource_ds, expected_dshape=dshape, **kwargs)
return into(a, b, dshape=dshape, **kwargs)
@into.register((type, (str, unicode)), (str, unicode))
@validate
def into_string_string(a, b, **kwargs):
return into(a, resource(b, **kwargs), **kwargs)
@into.register(object)
@validate
def into_curried(o, **kwargs1):
def curried_into(other, **kwargs2):
return into(o, other, **merge(kwargs2, kwargs1))
return curried_into
| {
"repo_name": "quantopian/odo",
"path": "odo/into.py",
"copies": "5",
"size": "4965",
"license": "bsd-3-clause",
"hash": 1434546908081121000,
"line_mean": 30.6242038217,
"line_max": 92,
"alpha_frac": 0.6616314199,
"autogenerated": false,
"ratio": 3.816295157571099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021341659263316522,
"num_lines": 157
} |
from __future__ import absolute_import, division, print_function
import functools
import numpy as np
import pandas as pd
from . import dtypes, duck_array_ops, nputils, ops
from .arithmetic import SupportsArithmetic
from .combine import concat
from .common import ImplementsArrayReduce, ImplementsDatasetReduce
from .pycompat import integer_types, range, zip
from .utils import hashable, maybe_wrap_array, peek_at, safe_cast_to_index
from .variable import IndexVariable, Variable, as_variable
def unique_value_groups(ar, sort=True):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
sort : boolean, optional
Whether or not to sort unique values.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=sort)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.data_vars.items()),
dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
xarray_obj.attrs)
elif isinstance(xarray_obj, DataArray):
res = DataArray(dtypes.get_fill_value(xarray_obj.dtype),
dict((k, dtypes.get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
dims=[],
name=xarray_obj.name,
attrs=xarray_obj.attrs)
else: # pragma: no cover
raise AssertionError
return res
def _is_one_or_none(obj):
return obj == 1 or obj is None
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result
def _inverse_permutation_indices(positions):
"""Like inverse_permutation, but also handles slices.
Parameters
----------
positions : list of np.ndarray or slice objects.
If slice objects, all are assumed to be slices.
Returns
-------
np.ndarray of indices or None, if no permutation is necessary.
"""
if not positions:
return None
if isinstance(positions[0], slice):
positions = _consolidate_slices(positions)
if positions == slice(None):
return None
positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]
indices = nputils.inverse_permutation(np.concatenate(positions))
return indices
class _DummyGroup(object):
"""Class for keeping track of grouped dimensions without coordinates.
Should not be user visible.
"""
def __init__(self, obj, name, coords):
self.name = name
self.coords = coords
self.dims = (name,)
self.ndim = 1
self.size = obj.sizes[name]
self.values = range(self.size)
def _ensure_1d(group, obj):
if group.ndim != 1:
# try to stack the dims of the group into a single dim
orig_dims = group.dims
stacked_dim = 'stacked_' + '_'.join(orig_dims)
# these dimensions get created by the stack operation
inserted_dims = [dim for dim in group.dims if dim not in group.coords]
# the copy is necessary here, otherwise read only array raises error
# in pandas: https://github.com/pydata/pandas/issues/12813
group = group.stack(**{stacked_dim: orig_dims}).copy()
obj = obj.stack(**{stacked_dim: orig_dims})
else:
stacked_dim = None
inserted_dims = []
return group, obj, stacked_dim, inserted_dims
def _unique_and_monotonic(group):
if isinstance(group, _DummyGroup):
return True
else:
index = safe_cast_to_index(group)
return index.is_unique and index.is_monotonic
class GroupBy(SupportsArithmetic):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
def __init__(self, obj, group, squeeze=False, grouper=None, bins=None,
cut_kwargs={}):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray
Array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
bins : array-like, optional
If `bins` is specified, the groups will be discretized into the
specified bins by `pandas.cut`.
cut_kwargs : dict, optional
Extra keyword arguments to pass to `pandas.cut`
"""
from .dataarray import DataArray
if grouper is not None and bins is not None:
raise TypeError("can't specify both `grouper` and `bins`")
if not isinstance(group, (DataArray, IndexVariable)):
if not hashable(group):
raise TypeError('`group` must be an xarray.DataArray or the '
'name of an xarray variable or dimension')
group = obj[group]
if group.name not in obj.coords and group.name in obj.dims:
# DummyGroups should not appear on groupby results
group = _DummyGroup(obj, group.name, group.coords)
if getattr(group, 'name', None) is None:
raise ValueError('`group` must have a name')
group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
group_dim, = group.dims
expected_size = obj.sizes[group_dim]
if group.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
full_index = None
if bins is not None:
binned = pd.cut(group.values, bins, **cut_kwargs)
new_dim_name = group.name + '_bins'
group = DataArray(binned, group.coords, name=new_dim_name)
full_index = binned.categories
if grouper is not None:
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError('index must be monotonic for resampling')
s = pd.Series(np.arange(index.size), index)
first_items = s.groupby(grouper).first()
full_index = first_items.index
if first_items.isnull().any():
first_items = first_items.dropna()
sbins = first_items.values.astype(np.int64)
group_indices = ([slice(i, j)
for i, j in zip(sbins[:-1], sbins[1:])] +
[slice(sbins[-1], None)])
unique_coord = IndexVariable(group.name, first_items.index)
elif group.dims == (group.name,) and _unique_and_monotonic(group):
# no need to factorize
group_indices = np.arange(group.size)
if not squeeze:
# use slices to do views instead of fancy indexing
# equivalent to: group_indices = group_indices.reshape(-1, 1)
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(
safe_cast_to_index(group), sort=(bins is None))
unique_coord = IndexVariable(group.name, unique_values)
# specification for the groupby operation
self._obj = obj
self._group = group
self._group_dim = group_dim
self._group_indices = group_indices
self._unique_coord = unique_coord
self._stacked_dim = stacked_dim
self._inserted_dims = inserted_dims
self._full_index = full_index
# cached attributes
self._groups = None
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self._unique_coord.values,
self._group_indices))
return self._groups
def __len__(self):
return self._unique_coord.size
def __iter__(self):
return zip(self._unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices})
def _infer_concat_args(self, applied_example):
if self._group_dim in applied_example.dims:
coord = self._group
positions = self._group_indices
else:
coord = self._unique_coord
positions = None
dim, = coord.dims
if isinstance(coord, _DummyGroup):
coord = None
return coord, dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._combine(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self._group.name: group_value})
except AttributeError:
raise TypeError('GroupBy objects only support binary ops '
'when the other argument is a Dataset or '
'DataArray')
except (KeyError, ValueError):
if self._group.name not in other.dims:
raise ValueError('incompatible dimensions for a grouped '
'binary operation: the group variable %r '
'is not a dimension on the other argument'
% self._group.name)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if (self._full_index is not None and
self._group.name in combined.dims):
indexers = {self._group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def _maybe_unstack(self, obj):
"""This gets called if we are applying on an array with a
multidimensional group."""
if self._stacked_dim is not None and self._stacked_dim in obj.dims:
obj = obj.unstack(self._stacked_dim)
for dim in self._inserted_dims:
if dim in obj.coords:
del obj.coords[dim]
return obj
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
out = ops.fillna(self, value)
return out
def where(self, cond, other=dtypes.NA):
"""Return elements from `self` or `other` depending on `cond`.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this objects values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, inserts missing values.
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return ops.where_method(self, cond, other)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self._group_indices[0], integer_types):
# NB. this is currently only used for reductions along an existing
# dimension
return self._obj
return self.reduce(op, self._group_dim, skipna=skipna,
keep_attrs=keep_attrs, allow_lazy=True)
def first(self, skipna=None, keep_attrs=True):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=True):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)
def assign_coords(self, **kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
"""
return self.apply(lambda ds: ds.assign_coords(**kwargs))
def _maybe_reorder(xarray_obj, dim, positions):
order = _inverse_permutation_indices(positions)
if order is None:
return xarray_obj
else:
return xarray_obj[{dim: order}]
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}]
def _concat_shortcut(self, applied, dim, positions=None):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(applied, dim, shortcut=True)
reordered = _maybe_reorder(stacked, dim, positions)
result = self._obj._replace_maybe_drop_dims(reordered)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self._group.name:
dimension, = self._group.dims
if dimension in self._obj.dims:
axis = self._obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, **kwargs))
for arr in grouped)
return self._combine(applied, shortcut=shortcut)
def _combine(self, applied, shortcut=False):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, dim, positions)
else:
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if isinstance(combined, type(self._obj)):
# only restore dimension order for arrays
combined = self._restore_dim_order(combined)
if coord is not None:
if shortcut:
combined._coords[coord.name] = as_variable(coord)
else:
combined.coords[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
shortcut=True, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
return self._combine(applied)
def _combine(self, applied):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if coord is not None:
combined[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/groupby.py",
"copies": "1",
"size": "25171",
"license": "apache-2.0",
"hash": 2869993255766389000,
"line_mean": 36.7376311844,
"line_max": 79,
"alpha_frac": 0.5917921418,
"autogenerated": false,
"ratio": 4.434637068357999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 667
} |
from __future__ import absolute_import, division, print_function
import functools
import numpy as np
from .. import Variable
from ..core import indexing
from ..core.pycompat import OrderedDict, bytes_type, iteritems, unicode_type
from ..core.utils import FrozenOrderedDict, close_on_error
from .common import (
HDF5_LOCK, DataStorePickleMixin, WritableCFDataStore, find_root)
from .netCDF4_ import (
BaseNetCDF4Array, _encode_nc4_variable, _extract_nc4_variable_encoding,
_get_datatype, _nc4_require_group)
class H5NetCDFArrayWrapper(BaseNetCDF4Array):
def __getitem__(self, key):
key, np_inds = indexing.decompose_indexer(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR)
# h5py requires using lists for fancy indexing:
# https://github.com/h5py/h5py/issues/992
key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in
key.tuple)
with self.datastore.ensure_open(autoclose=True):
array = self.get_array()[key]
if len(np_inds.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_inds]
return array
def maybe_decode_bytes(txt):
if isinstance(txt, bytes_type):
return txt.decode('utf-8')
else:
return txt
def _read_attributes(h5netcdf_var):
# GH451
# to ensure conventions decoding works properly on Python 3, decode all
# bytes attributes to strings
attrs = OrderedDict()
for k, v in h5netcdf_var.attrs.items():
if k not in ['_FillValue', 'missing_value']:
v = maybe_decode_bytes(v)
attrs[k] = v
return attrs
_extract_h5nc_encoding = functools.partial(
_extract_nc4_variable_encoding,
lsd_okay=False, h5py_okay=True, backend='h5netcdf')
def _h5netcdf_create_group(dataset, name):
return dataset.create_group(name)
def _open_h5netcdf_group(filename, mode, group):
import h5netcdf
ds = h5netcdf.File(filename, mode=mode)
with close_on_error(ds):
return _nc4_require_group(
ds, group, mode, create_group=_h5netcdf_create_group)
class H5NetCDFStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via h5netcdf
"""
def __init__(self, filename, mode='r', format=None, group=None,
writer=None, autoclose=False, lock=HDF5_LOCK):
if format not in [None, 'NETCDF4']:
raise ValueError('invalid format for h5netcdf backend')
opener = functools.partial(_open_h5netcdf_group, filename, mode=mode,
group=group)
self._ds = opener()
if autoclose:
raise NotImplementedError('autoclose=True is not implemented '
'for the h5netcdf backend pending '
'further exploration, e.g., bug fixes '
'(in h5netcdf?)')
self._autoclose = False
self._isopen = True
self.format = format
self._opener = opener
self._filename = filename
self._mode = mode
super(H5NetCDFStore, self).__init__(writer, lock=lock)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(
H5NetCDFArrayWrapper(name, self))
attrs = _read_attributes(var)
# netCDF4 specific encoding
encoding = {
'chunksizes': var.chunks,
'fletcher32': var.fletcher32,
'shuffle': var.shuffle,
}
# Convert h5py-style compression options to NetCDF4-Python
# style, if possible
if var.compression == 'gzip':
encoding['zlib'] = True
encoding['complevel'] = var.compression_opts
elif var.compression is not None:
encoding['compression'] = var.compression
encoding['compression_opts'] = var.compression_opts
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
encoding['original_shape'] = var.shape
return Variable(dimensions, data, attrs, encoding)
def get_variables(self):
with self.ensure_open(autoclose=False):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
with self.ensure_open(autoclose=True):
return FrozenOrderedDict(_read_attributes(self.ds))
def get_dimensions(self):
with self.ensure_open(autoclose=True):
return self.ds.dimensions
def get_encoding(self):
with self.ensure_open(autoclose=True):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v is None}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
with self.ensure_open(autoclose=False):
if is_unlimited:
self.ds.dimensions[name] = None
self.ds.resize_dimension(name, length)
else:
self.ds.dimensions[name] = length
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
self.ds.attrs[key] = value
def encode_variable(self, variable):
return _encode_nc4_variable(variable)
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
import h5py
attrs = variable.attrs.copy()
dtype = _get_datatype(variable)
fillvalue = attrs.pop('_FillValue', None)
if dtype is str and fillvalue is not None:
raise NotImplementedError(
'h5netcdf does not yet support setting a fill value for '
'variable-length strings '
'(https://github.com/shoyer/h5netcdf/issues/37). '
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
'NC_CHAR type.' % name)
if dtype is str:
dtype = h5py.special_dtype(vlen=unicode_type)
encoding = _extract_h5nc_encoding(variable,
raise_on_invalid=check_encoding)
kwargs = {}
# Convert from NetCDF4-Python style compression settings to h5py style
# If both styles are used together, h5py takes precedence
# If set_encoding=True, raise ValueError in case of mismatch
if encoding.pop('zlib', False):
if (check_encoding and encoding.get('compression')
not in (None, 'gzip')):
raise ValueError("'zlib' and 'compression' encodings mismatch")
encoding.setdefault('compression', 'gzip')
if (check_encoding and encoding.get('complevel') not in
(None, encoding.get('compression_opts'))):
raise ValueError("'complevel' and 'compression_opts' encodings "
"mismatch")
complevel = encoding.pop('complevel', 0)
if complevel != 0:
encoding.setdefault('compression_opts', complevel)
encoding['chunks'] = encoding.pop('chunksizes', None)
for key in ['compression', 'compression_opts', 'shuffle',
'chunks', 'fletcher32']:
if key in encoding:
kwargs[key] = encoding[key]
if name not in self.ds:
nc4_var = self.ds.create_variable(
name, dtype=dtype, dimensions=variable.dims,
fillvalue=fillvalue, **kwargs)
else:
nc4_var = self.ds[name]
for k, v in iteritems(attrs):
nc4_var.attrs[k] = v
target = H5NetCDFArrayWrapper(name, self)
return target, variable.data
def sync(self, compute=True):
if not compute:
raise NotImplementedError(
'compute=False is not supported for the h5netcdf backend yet')
with self.ensure_open(autoclose=True):
super(H5NetCDFStore, self).sync(compute=compute)
self.ds.sync()
def close(self):
if self._isopen:
# netCDF4 only allows closing the root group
ds = find_root(self.ds)
if not ds._closed:
ds.close()
self._isopen = False
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/h5netcdf_.py",
"copies": "1",
"size": "8611",
"license": "apache-2.0",
"hash": -2122231074646926000,
"line_mean": 35.7991452991,
"line_max": 79,
"alpha_frac": 0.589710835,
"autogenerated": false,
"ratio": 4.079109426811938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 234
} |
from __future__ import absolute_import, division, print_function
import functools
import numpy as np
from .. import Variable
from ..core import indexing
from ..core.utils import Frozen, FrozenOrderedDict
from .common import AbstractDataStore, BackendArray, DataStorePickleMixin
class NioArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.typecode())
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name]
def __getitem__(self, key):
key, np_inds = indexing.decompose_indexer(
key, self.shape, indexing.IndexingSupport.BASIC)
with self.datastore.ensure_open(autoclose=True):
array = self.get_array()
if key.tuple == () and self.ndim == 0:
return array.get_value()
array = array[key.tuple]
if len(np_inds.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_inds]
return array
class NioDataStore(AbstractDataStore, DataStorePickleMixin):
"""Store for accessing datasets via PyNIO
"""
def __init__(self, filename, mode='r', autoclose=False):
import Nio
opener = functools.partial(Nio.open_file, filename, mode=mode)
self._ds = opener()
self._autoclose = autoclose
self._isopen = True
self._opener = opener
self._mode = mode
# xarray provides its own support for FillValue,
# so turn off PyNIO's support for the same.
self.ds.set_option('MaskedArrayMode', 'MaskedNever')
def open_store_variable(self, name, var):
data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))
return Variable(var.dimensions, data, var.attributes)
def get_variables(self):
with self.ensure_open(autoclose=False):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in self.ds.variables.items())
def get_attrs(self):
with self.ensure_open(autoclose=True):
return Frozen(self.ds.attributes)
def get_dimensions(self):
with self.ensure_open(autoclose=True):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = set(
[k for k in self.ds.dimensions if self.ds.unlimited(k)])
return encoding
def close(self):
if self._isopen:
self.ds.close()
self._isopen = False
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/pynio_.py",
"copies": "1",
"size": "2731",
"license": "apache-2.0",
"hash": -7227445321616380000,
"line_mean": 31.5119047619,
"line_max": 76,
"alpha_frac": 0.6246796045,
"autogenerated": false,
"ratio": 3.935158501440922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 84
} |
from __future__ import absolute_import, division, print_function
import functools
import six
from rest_framework.response import Response
from sentry import analytics, eventstore, search
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.helpers.group_index import (
build_query_params_from_request,
delete_groups,
get_by_short_id,
update_groups,
ValidationError,
)
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.models import Environment, Group, GroupStatus
from sentry.models.event import Event
from sentry.models.savedsearch import DEFAULT_SAVED_SEARCH_QUERIES
from sentry.signals import advanced_search
from sentry.utils.apidocs import attach_scenarios, scenario
from sentry.utils.cursors import CursorResult
from sentry.utils.validators import normalize_event_id
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
@scenario("BulkUpdateIssues")
def bulk_update_issues_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method="PUT",
path="/projects/%s/%s/issues/?id=%s&id=%s"
% (runner.org.slug, project.slug, group1.id, group2.id),
data={"status": "unresolved", "isPublic": False},
)
@scenario("BulkRemoveIssuess")
def bulk_remove_issues_scenario(runner):
with runner.isolated_project("Amazing Plumbing") as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method="DELETE",
path="/projects/%s/%s/issues/?id=%s&id=%s"
% (runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario("ListProjectIssuess")
def list_project_issues_scenario(runner):
project = runner.default_project
runner.request(
method="GET",
path="/projects/%s/%s/issues/?statsPeriod=24h" % (runner.org.slug, project.slug),
)
class ProjectGroupIndexEndpoint(ProjectEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _search(self, request, project, extra_query_kwargs=None):
try:
environment = self._get_environment_from_request(request, project.organization_id)
except Environment.DoesNotExist:
# XXX: The 1000 magic number for `max_hits` is an abstraction leak
# from `sentry.api.paginator.BasePaginator.get_result`.
result = CursorResult([], None, None, hits=0, max_hits=1000)
query_kwargs = {}
else:
environments = [environment] if environment is not None else environment
query_kwargs = build_query_params_from_request(
request, project.organization, [project], environments
)
if extra_query_kwargs is not None:
assert "environment" not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
query_kwargs["environments"] = environments
result = search.query(**query_kwargs)
return result, query_kwargs
# statsPeriod=24h
@attach_scenarios([list_project_issues_scenario])
def get(self, request, project):
"""
List a Project's Issues
```````````````````````
Return a list of issues (groups) bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
stats_period = request.GET.get("statsPeriod")
if stats_period not in (None, "", "24h", "14d"):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = "24h"
elif stats_period == "":
# disable stats
stats_period = None
serializer = functools.partial(
StreamGroupSerializer,
environment_func=self._get_environment_func(request, project.organization_id),
stats_period=stats_period,
)
query = request.GET.get("query", "").strip()
if query:
matching_group = None
matching_event = None
event_id = normalize_event_id(query)
if event_id:
# check to see if we've got an event ID
try:
matching_group = Group.objects.from_event_id(project, event_id)
except Group.DoesNotExist:
pass
else:
matching_event = eventstore.get_event_by_id(project.id, event_id)
if matching_event is not None:
Event.objects.bind_nodes([matching_event], "data")
elif matching_group is None:
matching_group = get_by_short_id(
project.organization_id, request.GET.get("shortIdLookup"), query
)
if matching_group is not None and matching_group.project_id != project.id:
matching_group = None
if matching_group is not None:
matching_event_environment = None
try:
matching_event_environment = (
matching_event.get_environment().name if matching_event else None
)
except Environment.DoesNotExist:
pass
response = Response(
serialize(
[matching_group],
request.user,
serializer(
matching_event_id=getattr(matching_event, "id", None),
matching_event_environment=matching_event_environment,
),
)
)
response["X-Sentry-Direct-Hit"] = "1"
return response
try:
cursor_result, query_kwargs = self._search(request, project, {"count_hits": True})
except ValidationError as exc:
return Response({"detail": six.text_type(exc)}, status=400)
results = list(cursor_result)
context = serialize(results, request.user, serializer())
# HACK: remove auto resolved entries
# TODO: We should try to integrate this into the search backend, since
# this can cause us to arbitrarily return fewer results than requested.
status = [
search_filter
for search_filter in query_kwargs.get("search_filters", [])
if search_filter.key.name == "status"
]
if status and status[0].value.raw_value == GroupStatus.UNRESOLVED:
context = [r for r in context if r["status"] == "unresolved"]
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
if results and query not in DEFAULT_SAVED_SEARCH_QUERIES:
advanced_search.send(project=project, sender=request.user)
analytics.record(
"project_issue.searched",
user_id=request.user.id,
organization_id=project.organization_id,
project_id=project.id,
query=query,
)
return response
@attach_scenarios([bulk_update_issues_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``"resolvedInNextRelease"``,
``"unresolved"``, and ``"ignored"``.
:param map statusDetails: additional details about the resolution.
Valid values are ``"inRelease"``, ``"inNextRelease"``,
``"inCommit"``, ``"ignoreDuration"``, ``"ignoreCount"``,
``"ignoreWindow"``, ``"ignoreUserCount"``, and
``"ignoreUserWindow"``.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the actor id (or username) of the user or team that should be
assigned to this issue.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
search_fn = functools.partial(self._search, request, project)
return update_groups(request, [project], project.organization_id, search_fn)
def delete(self, request, project):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
search_fn = functools.partial(self._search, request, project)
return delete_groups(request, [project], project.organization_id, search_fn)
| {
"repo_name": "mvaled/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "13242",
"license": "bsd-3-clause",
"hash": -455965934252018700,
"line_mean": 42.5592105263,
"line_max": 95,
"alpha_frac": 0.5752152243,
"autogenerated": false,
"ratio": 4.8013052936910805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5876520517991081,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import gc
import io
import locale # system locale module, not tornado.locale
import logging
import operator
import textwrap
import sys
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver
from tornado.options import define, options, add_parse_callback
from tornado.test.util import unittest
try:
reduce # py2
except NameError:
from functools import reduce # py3
TEST_MODULES = [
'tornado.httputil.doctests',
'tornado.iostream.doctests',
'tornado.util.doctests',
'tornado.test.asyncio_test',
'tornado.test.auth_test',
'tornado.test.autoreload_test',
'tornado.test.concurrent_test',
'tornado.test.curl_httpclient_test',
'tornado.test.escape_test',
'tornado.test.gen_test',
'tornado.test.http1connection_test',
'tornado.test.httpclient_test',
'tornado.test.httpserver_test',
'tornado.test.httputil_test',
'tornado.test.import_test',
'tornado.test.ioloop_test',
'tornado.test.iostream_test',
'tornado.test.locale_test',
'tornado.test.locks_test',
'tornado.test.netutil_test',
'tornado.test.log_test',
'tornado.test.options_test',
'tornado.test.process_test',
'tornado.test.queues_test',
'tornado.test.routing_test',
'tornado.test.simple_httpclient_test',
'tornado.test.stack_context_test',
'tornado.test.tcpclient_test',
'tornado.test.tcpserver_test',
'tornado.test.template_test',
'tornado.test.testing_test',
'tornado.test.twisted_test',
'tornado.test.util_test',
'tornado.test.web_test',
'tornado.test.websocket_test',
'tornado.test.windows_test',
'tornado.test.wsgi_test',
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
def test_runner_factory(stderr):
class TornadoTextTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
super(TornadoTextTestRunner, self).__init__(*args, stream=stderr, **kwargs)
def run(self, test):
result = super(TornadoTextTestRunner, self).run(test)
if result.skipped:
skip_reasons = set(reason for (test, reason) in result.skipped)
self.stream.write(textwrap.fill(
"Some tests were skipped because: %s" %
", ".join(sorted(skip_reasons))))
self.stream.write("\n")
return result
return TornadoTextTestRunner
class LogCounter(logging.Filter):
"""Counts the number of WARNING or higher log records."""
def __init__(self, *args, **kwargs):
# Can't use super() because logging.Filter is an old-style class in py26
logging.Filter.__init__(self, *args, **kwargs)
self.info_count = self.warning_count = self.error_count = 0
def filter(self, record):
if record.levelno >= logging.ERROR:
self.error_count += 1
elif record.levelno >= logging.WARNING:
self.warning_count += 1
elif record.levelno >= logging.INFO:
self.info_count += 1
return True
class CountingStderr(io.IOBase):
def __init__(self, real):
self.real = real
self.byte_count = 0
def write(self, data):
self.byte_count += len(data)
return self.real.write(data)
def flush(self):
return self.real.flush()
def main():
# The -W command-line option does not work in a virtualenv with
# python 3 (as of virtualenv 1.7), so configure warnings
# programmatically instead.
import warnings
# Be strict about most warnings. This also turns on warnings that are
# ignored by default, including DeprecationWarnings and
# python 3.2's ResourceWarnings.
warnings.filterwarnings("error")
# setuptools sometimes gives ImportWarnings about things that are on
# sys.path even if they're not being used.
warnings.filterwarnings("ignore", category=ImportWarning)
# Tornado generally shouldn't use anything deprecated, but some of
# our dependencies do (last match wins).
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("error", category=DeprecationWarning,
module=r"tornado\..*")
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings("error", category=PendingDeprecationWarning,
module=r"tornado\..*")
# The unittest module is aggressive about deprecating redundant methods,
# leaving some without non-deprecated spellings that work on both
# 2.7 and 3.2
warnings.filterwarnings("ignore", category=DeprecationWarning,
message="Please use assert.* instead")
warnings.filterwarnings("ignore", category=PendingDeprecationWarning,
message="Please use assert.* instead")
# Twisted 15.0.0 triggers some warnings on py3 with -bb.
warnings.filterwarnings("ignore", category=BytesWarning,
module=r"twisted\..*")
if (3,) < sys.version_info < (3, 6):
# Prior to 3.6, async ResourceWarnings were rather noisy
# and even
# `python3.4 -W error -c 'import asyncio; asyncio.get_event_loop()'`
# would generate a warning.
warnings.filterwarnings("ignore", category=ResourceWarning, # noqa: F821
module=r"asyncio\..*")
logging.getLogger("tornado.access").setLevel(logging.CRITICAL)
define('httpclient', type=str, default=None,
callback=lambda s: AsyncHTTPClient.configure(
s, defaults=dict(allow_ipv6=False)))
define('httpserver', type=str, default=None,
callback=HTTPServer.configure)
define('ioloop', type=str, default=None)
define('ioloop_time_monotonic', default=False)
define('resolver', type=str, default=None,
callback=Resolver.configure)
define('debug_gc', type=str, multiple=True,
help="A comma-separated list of gc module debug constants, "
"e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS",
callback=lambda values: gc.set_debug(
reduce(operator.or_, (getattr(gc, v) for v in values))))
define('locale', type=str, default=None,
callback=lambda x: locale.setlocale(locale.LC_ALL, x))
options.disable_hh_patches = True
def configure_ioloop():
kwargs = {}
if options.ioloop_time_monotonic:
from tornado.platform.auto import monotonic_time
if monotonic_time is None:
raise RuntimeError("monotonic clock not found")
kwargs['time_func'] = monotonic_time
if options.ioloop or kwargs:
IOLoop.configure(options.ioloop, **kwargs)
add_parse_callback(configure_ioloop)
log_counter = LogCounter()
add_parse_callback(
lambda: logging.getLogger().handlers[0].addFilter(log_counter))
# Certain errors (especially "unclosed resource" errors raised in
# destructors) go directly to stderr instead of logging. Count
# anything written by anything but the test runner as an error.
orig_stderr = sys.stderr
sys.stderr = CountingStderr(orig_stderr)
import tornado.testing
kwargs = {}
if sys.version_info >= (3, 2):
# HACK: unittest.main will make its own changes to the warning
# configuration, which may conflict with the settings above
# or command-line flags like -bb. Passing warnings=False
# suppresses this behavior, although this looks like an implementation
# detail. http://bugs.python.org/issue15626
kwargs['warnings'] = False
kwargs['testRunner'] = test_runner_factory(orig_stderr)
try:
tornado.testing.main(**kwargs)
finally:
# The tests should run clean; consider it a failure if they
# logged anything at info level or above.
if (log_counter.info_count > 0 or
log_counter.warning_count > 0 or
log_counter.error_count > 0 or
sys.stderr.byte_count > 0):
logging.error("logged %d infos, %d warnings, %d errors, and %d bytes to stderr",
log_counter.info_count, log_counter.warning_count,
log_counter.error_count, sys.stderr.byte_count)
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_name": "hhru/tornado",
"path": "tornado/test/runtests.py",
"copies": "1",
"size": "8554",
"license": "apache-2.0",
"hash": -1016291712964334000,
"line_mean": 37.7058823529,
"line_max": 92,
"alpha_frac": 0.6502221183,
"autogenerated": false,
"ratio": 4.090865614538498,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001559366676381224,
"num_lines": 221
} |
from __future__ import absolute_import, division, print_function
import getpass
import logging
import os
import socket
from threading import Thread
from time import sleep
from mentos.subscription import Event, Subscription
from mentos.utils import encode_data, run_background
from toolz import merge
from tornado.ioloop import IOLoop
log = logging.getLogger(__name__)
class SchedulerDriver(object):
def __init__(self, scheduler, name, user=getpass.getuser(),
master=os.getenv('MESOS_MASTER', 'zk://localhost:2181'),
failover_timeout=100, capabilities=None, principal=None, secret=None,
implicit_acknowledgements=True, handlers={}, loop=None):
self.loop = loop or IOLoop()
self.master = master
self.leading_master_seq = None
self.leading_master_info = None
self.scheduler = scheduler
self.framework = {
'user': user,
'name': name,
'capabilities': capabilities or [],
'failover_timeout': failover_timeout,
'hostname': socket.gethostname()
}
self.implicit_acknowledgements = implicit_acknowledgements
defaults = {Event.SUBSCRIBED: self.on_subscribed,
Event.OFFERS: self.on_offers,
Event.RESCIND: self.on_rescind,
Event.UPDATE: self.on_update,
Event.MESSAGE: self.on_message,
Event.RESCIND_INVERSE_OFFER: self.on_rescind_inverse,
Event.FAILURE: self.on_failure,
Event.ERROR: self.on_error,
Event.HEARTBEAT: self.on_heartbeat,
Event.OUTBOUND_SUCCESS: self.on_outbound_success,
Event.OUTBOUND_ERROR: self.on_outbound_error}
self.handlers = merge(defaults, handlers)
self.subscription = Subscription(self.framework, self.master,
'/api/v1/scheduler', self.handlers,
principal=principal,
secret=secret,
timeout=failover_timeout,
loop=self.loop)
def start(self, block=False, **kwargs):
''' Start scheduler running in separate thread '''
log.debug('Starting scheduler')
# if hasattr(self, '_loop_thread'):
# if not self._loop_thread._is_stopped:
# return
if not self.loop._running:
self._loop_thread = Thread(target=self.loop.start)
self._loop_thread.daemon = True
self._loop_thread.start()
while not self.loop._running: # pragma: no cover
sleep(0.001)
self.loop.add_callback(self.subscription.start)
if block: # pragma: no cover
self._loop_thread.join()
def stop(self):
log.debug('Terminating Scheduler Driver')
if self.subscription:
self.subscription.close()
if self.loop:
def on_complete(self):
log.debug('Closed scheduler')
run_background(self.loop.stop, on_complete)
# self.loop.add_callback(sp)
while self.loop._running:
sleep(0.1)
def request(self, requests):
payload = {
'type': 'REQUEST',
'requests': requests
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Request resources from Mesos')
def kill(self, task_id, agent_id):
payload = {
'type': 'KILL',
'kill': {
'task_id': {
'value': task_id
},
'agent_id': {
'value': agent_id
}
}
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Kills task {}'.format(task_id))
def reconcile(self, task_id, agent_id):
if task_id and agent_id:
payload = {
'type': 'RECONCILE',
'reconcile': {
'tasks': [{
'task_id': {
'value': task_id
},
'agent_id': {
'value': agent_id
}
}]
}}
log.warn('Reconciling task ID: ' + task_id)
else:
payload = {
'type': 'RECONCILE',
'reconcile': {'tasks': []}
}
log.debug('Reconciling all tasks')
if payload:
self.loop.add_callback(self.subscription.send, payload)
else: # pragma: no cover
log.debug('Agent and Task not set')
def decline(self, offer_ids, filters=None):
if isinstance(offer_ids, dict):
offer_ids = [offer_ids]
decline = {
'offer_ids': offer_ids
}
if filters is not None:
decline['filters'] = filters
payload = {
'type': 'DECLINE',
'decline': decline
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Declines offer {}'.format(offer_ids))
def launch(self, offer_ids, tasks, filters=None):
if not tasks:
return self.decline(offer_ids, filters=filters)
operations = [{
'type': 'LAUNCH',
'launch': {
'task_infos': tasks
}
}]
self.accept(offer_ids, operations, filters=filters)
log.debug('Launching operations {} with filters {}'.format(
operations, filters))
def accept(self, offer_ids, operations, filters=None):
if not operations:
self.decline(offer_ids, filters=filters)
else:
accept = {
'offer_ids': offer_ids,
'operations': operations
}
if filters is not None:
accept['filters'] = filters
payload = {
'type': 'ACCEPT',
'accept': accept
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Accepts offers {}'.format(offer_ids))
def revive(self):
payload = {
'type': 'REVIVE'
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Revives; removes all filters previously set by framework')
def acknowledge(self, status):
if 'uuid' not in status:
log.warn('Did not get a UUID for %s' % status)
return
payload = {
'type': 'ACKNOWLEDGE',
'acknowledge': {
'agent_id': status['agent_id'],
'task_id': status['task_id'],
'uuid': status['uuid']
}
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Acknowledges status update {}'.format(status))
def message(self, executor_id, agent_id, message):
payload = {
'type': 'MESSAGE',
'message': {
'agent_id': {
'value': agent_id
},
'executor_id': {
'value': executor_id
},
'data': encode_data(message)
}
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Sends message `{}` to executor `{}` on agent `{}`'.format(
message, executor_id, agent_id))
def shutdown(self, agent_id, executor_Id):
payload = {
'type': 'SHUTDOWN',
'kill': {
'executor_id': {
'value': executor_Id
},
'agent_id': {
'value': agent_id
}
}
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Sent shutdown signal')
def teardown(self, framework_id):
payload = {
'type': 'TEARDOWN'
}
self.loop.add_callback(self.subscription.send, payload)
log.debug('Sent teardown signal')
def on_error(self, event):
message = event['message']
self.scheduler.on_error(self, message)
log.debug('Got error %s' % event)
def on_heartbeat(self, event):
self.scheduler.on_heartbeat(self, event)
log.debug('Got Heartbeat')
def on_subscribed(self, info):
self.scheduler.on_reregistered(
self, info['framework_id'], self.subscription.master_info.info)
log.debug('Subscribed %s' % info)
def on_offers(self, event):
offers = event['offers']
self.scheduler.on_offers(
self, offers
)
log.debug('Got offers %s' % event)
def on_rescind_inverse(self, event):
offer_id = event['offer_id']
self.scheduler.on_rescind_inverse(self, offer_id)
log.debug('Inverse rescind offer %s' % event)
def on_rescind(self, event):
offer_id = event['offer_id']
self.scheduler.on_rescinded(self, offer_id)
log.debug('Rescind offer %s' % event)
def on_update(self, event):
status = event['status']
self.scheduler.on_update(self, status)
if self.implicit_acknowledgements:
self.acknowledge(status)
log.debug('Got update %s' % event)
def on_message(self, event):
executor_id = event['executor_id']
agent_id = event['agent_id']
data = event['data']
self.scheduler.on_message(
self, executor_id, agent_id, data
)
log.debug('Got message %s' % event)
def on_failure(self, event):
agent_id = event['agent_id']
if 'executor_id' not in event:
self.scheduler.on_agent_lost(self, agent_id)
log.debug('Lost agent %s' % agent_id)
else:
executor_id = event['executor_id']
status = event['status']
self.scheduler.on_executor_lost(
self, executor_id,
agent_id, status
)
log.debug('Lost executor %s on agent %s' % (executor_id, agent_id))
def on_outbound_success(self, event):
self.scheduler.on_outbound_success(self, event['request'])
log.debug('Got success on outbound %s' % event)
def on_outbound_error(self, event):
self.scheduler.on_outbound_error(self, event['request'], event[
'endpoint'], event['error'])
log.debug('Got error on outbound %s' % event)
def __str__(self):
return '<%s: scheduler=%s:%s:%s>' % (
self.__class__.__name__, self.master,
self.subscription.master_info.info, self.framework)
__repr__ = __str__
def __enter__(self):
if not self.loop._running:
log.debug('Entering context manager')
self.start(block=False)
return self
def __exit__(self, type, value, traceback):
log.debug('Exited context manager')
self.stop()
def __del__(self): # pragma: no cover
log.debug('Deleting scheduler')
self.stop()
| {
"repo_name": "Arttii/malefico",
"path": "mentos/scheduler.py",
"copies": "2",
"size": "11355",
"license": "apache-2.0",
"hash": -4477812888790744000,
"line_mean": 31.8179190751,
"line_max": 86,
"alpha_frac": 0.515896081,
"autogenerated": false,
"ratio": 4.229050279329609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5744946360329609,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import gym
class EnvWrapper:
"""Wrapper around OpenAI Gym environments allowing
for state and reward preprocessing."""
def __init__(self, name, state_filter=None, reward_filter=None):
self.name = name
self.env = gym.make(name)
self.state_filter = state_filter
self.reward_filter = reward_filter
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.spec = self.env.spec
def step(self, action):
s, reward, done, info = self.env.step(action)
s = self.state_filter(s) if self.state_filter else s
reward = self.reward_filter(reward) if self.reward_filter else reward
return s, reward, done, info
def reset(self):
s = self.env.reset()
return self.state_filter(s) if self.state_filter else s
def render(self, *args, **kwargs):
self.env.render(*args, **kwargs)
def seed(self, seed=None):
return self.env.seed(seed)
def close(self):
self.env.close()
@property
def monitor(self):
return self.env.monitor
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/envs/env_wrapper.py",
"copies": "1",
"size": "1191",
"license": "mit",
"hash": 1467084896663312100,
"line_mean": 28.775,
"line_max": 77,
"alpha_frac": 0.6364399664,
"autogenerated": false,
"ratio": 3.7452830188679247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881722985267924,
"avg_score": 0,
"num_lines": 40
} |
from __future__ import absolute_import, division, print_function
import gzip
from datashape import discover, dshape
from collections import Iterator
from toolz import partial, concat
import uuid
import os
from ..compatibility import unicode
from ..chunks import chunks
from ..drop import drop
from ..temp import Temp
from ..append import append
from ..convert import convert
from ..resource import resource
class TextFile(object):
canonical_extension = 'txt'
def __init__(self, path, **kwargs):
self.path = path
@property
def open(self):
if self.path.split(os.path.extsep)[-1] == 'gz':
return gzip.open
else:
return open
@convert.register(Iterator, (TextFile, Temp(TextFile)), cost=0.1)
def textfile_to_iterator(data, **kwargs):
with data.open(data.path) as f:
for line in f:
yield line
@convert.register(Iterator, chunks(TextFile), cost=0.1)
def chunks_textfile_to_iterator(data, **kwargs):
return concat(map(partial(convert, Iterator), data))
@discover.register((TextFile, Temp(TextFile)))
def discover_textfile(data, **kwargs):
return dshape('var * string')
@append.register((Temp(TextFile), TextFile), Iterator)
def append_iterator_to_textfile(target, source, **kwargs):
with target.open(target.path, 'a') as f:
for item in source:
f.write(unicode(item))
f.write('\n') # TODO: detect OS-level newline character
return target
@append.register(TextFile, object)
def append_anything_to_textfile(target, source, **kwargs):
return append(target, convert(Iterator, source, **kwargs), **kwargs)
@convert.register(Temp(TextFile), Iterator)
def iterator_to_temp_textfile(seq, **kwargs):
fn = str(uuid.uuid1())
txt = Temp(TextFile)(fn)
return append(txt, seq, **kwargs)
@resource.register('.+\.(txt|log)(.gz)?')
def resource_sas(uri, **kwargs):
return TextFile(uri)
@drop.register(TextFile)
def drop_textfile(data, **kwargs):
os.remove(data.path)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/text.py",
"copies": "9",
"size": "2024",
"license": "bsd-3-clause",
"hash": 4503148597744755000,
"line_mean": 24.9487179487,
"line_max": 72,
"alpha_frac": 0.6818181818,
"autogenerated": false,
"ratio": 3.5261324041811846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8707950585981186,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import h5py
import os
import datashape
from datashape import DataShape, Record, to_numpy, discover
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
import numpy as np
from toolz import keyfilter
from ..numpy_dtype import dshape_to_numpy
from ..append import append
from ..convert import convert, ooc_types
from ..create import create
from ..resource import resource
from ..chunks import chunks
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
try:
unicode_dtype = h5py.special_dtype(vlen=unicode)
except NameError:
unicode_dtype = h5py.special_dtype(vlen=str)
@discover.register((h5py.Group, h5py.File))
def discover_h5py_group_file(g):
return DataShape(Record([[k, discover(v)] for k, v in g.items()]))
def record_dshape_replace(dshape, old, new):
"""Recursively replace all instances of `old` with `new` in the record
dshape `dshape`.
Examples
--------
>>> from datashape import Record, string, object_, dshape
>>> ds = DataShape(Record([('a', 'int64'),
... ('b', 10 * Record([('c', 'object')])),
... ('d', 'int64')]))
...
>>> Record(list(record_dshape_replace(ds, object_, string)))
dshape("{a: int64, b: 10 * {c: object}, d: int64}")
"""
assert isrecord(dshape), 'input dshape must be a record'
for name, subshape in dshape.measure.fields:
if subshape == old:
yield name, new
else:
if isrecord(subshape):
yield record_dshape_replace(subshape, old, new)
else:
yield name, subshape
@discover.register(h5py.Dataset)
def discover_h5py_dataset(d):
dshape = datashape.from_numpy(d.shape, d.dtype)
shape, measure = dshape.shape, dshape.measure
if not isrecord(measure):
if dshape == datashape.object_:
args = shape + (datashape.string,)
return DataShape(*args)
return dshape
else:
records = list(record_dshape_replace(measure, datashape.object_,
datashape.string))
args = shape + (datashape.Record(records),)
return DataShape(*args)
def dtype_replace(dtype, old, new):
"""Replace the subdtype `old` in `subdtype` with `new`.
Parameters
----------
dtype, old, new : dtype
Examples
--------
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> r = np.dtype(list(dtype_replace(dt, 'int64', 'float64')))
>>> r
dtype([('a', '<f8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
"""
names = dtype.names
assert names is not None, 'dtype must be record-like'
for name, subdtype in zip(names, map(dtype.__getitem__, names)):
if subdtype == old:
yield name, new
else:
if subdtype.names is not None:
yield name, list(dtype_replace(subdtype, old, new))
else:
yield name, subdtype
def varlen_dtype(dt):
"""Inject variable length string element for object dtype
Examples
--------
>>> dt = np.dtype('object')
>>> dt
dtype('O')
>>> r = varlen_dtype(dt)
>>> r
dtype('O')
>>> r.metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> dt['b'].metadata
>>> r = varlen_dtype(dt)
>>> r
dtype([('a', '<i8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
>>> r['b'].metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
"""
if dt == np.object_:
return unicode_dtype
elif dt.names is None: # some kind of non record like dtype
return dt
else:
return np.dtype(list(dtype_replace(dt, np.dtype('object'),
unicode_dtype)))
def dataset_from_dshape(file, datapath, ds, **kwargs):
dtype = varlen_dtype(dshape_to_numpy(ds))
if datashape.var not in list(ds):
shape = tuple(map(int, ds.shape))
elif datashape.var not in list(ds)[1:]:
shape = (0,) + tuple(map(int, ds.shape[1:]))
else:
raise ValueError("Don't know how to handle varlen nd shapes")
if shape:
kwargs['chunks'] = kwargs.get('chunks', True)
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
kwargs2 = keyfilter(h5py_attributes.__contains__, kwargs)
return file.require_dataset(datapath, shape=shape, dtype=dtype, **kwargs2)
def create_from_datashape(group, ds, name=None, **kwargs):
if not isrecord(ds):
raise ValueError(
"Trying to create an HDF5 file with non-record datashape failed\n"
"Perhaps you forgot to specify a datapath?\n"
"\tdshape: %s\n"
"If you're using odo consider the following change\n"
"\tBefore: odo(data, 'myfile.hdf5')\n"
"\tAfter: odo(data, 'myfile.hdf5::/datapath')" % ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
for name, sub_ds in ds.dict.items():
if isrecord(sub_ds):
g = group.require_group(name)
create_from_datashape(g, sub_ds, **kwargs)
else:
dataset_from_dshape(file=group.file,
datapath='/'.join([group.name, name]),
ds=sub_ds, **kwargs)
@create.register(h5py.File)
def create_h5py_file(cls, path=None, dshape=None, **kwargs):
f = h5py.File(path)
create_from_datashape(f, dshape, **kwargs)
return f
@append.register(h5py.Dataset, np.ndarray)
def append_h5py(dset, x, **kwargs):
if not sum(x.shape):
return dset
shape = list(dset.shape)
shape[0] += len(x)
dset.resize(shape)
dset[-len(x):] = x
return dset
@append.register(h5py.Dataset, chunks(np.ndarray))
def append_h5py(dset, c, **kwargs):
for chunk in c:
append(dset, chunk)
return dset
@append.register(h5py.Dataset, object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, h5py.Dataset, cost=3.0)
def h5py_to_numpy(dset, force=False, **kwargs):
if dset.size > 1e9:
raise MemoryError(("File size is large: %0.2f GB.\n"
"Convert with flag force=True to force loading") %
(dset.size / 1e9))
else:
return dset[:]
@convert.register(chunks(np.ndarray), h5py.Dataset, cost=3.0)
def h5py_to_numpy_chunks(dset, chunksize=2 ** 20, **kwargs):
def load():
for i in range(0, dset.shape[0], chunksize):
yield dset[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('h5py://.+', priority=11)
def resource_h5py(uri, datapath=None, dshape=None, expected_dshape=None,
**kwargs):
if uri.startswith('h5py://'):
uri = uri[len('h5py://'):]
f = h5py.File(uri)
olddatapath = datapath
if datapath is not None and datapath in f:
old_dset = f[datapath]
if expected_dshape is not None:
dshape = expected_dshape
assert dshape == discover(old_dset)
if dshape is not None:
ds = datashape.dshape(dshape)
if datapath:
while ds and datapath:
datapath, name = datapath.rsplit('/', 1)
ds = Record([[name, ds]])
ds = datashape.dshape(ds)
f.close()
f = create(h5py.File, path=uri, dshape=ds, **kwargs)
if olddatapath:
return f[olddatapath]
else:
return f
@resource.register(r'^(?!hdfstore).+\.(hdf5|h5)', priority=10)
def resource_hdf5(uri, *args, **kwargs):
return resource_h5py(uri, *args, **kwargs)
@dispatch((h5py.Group, h5py.Dataset))
def drop(h):
del h.file[h.name]
@dispatch(h5py.File)
def drop(h):
fn = h.filename
h.close()
os.remove(fn)
ooc_types.add(h5py.Dataset)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/h5py.py",
"copies": "9",
"size": "8231",
"license": "bsd-3-clause",
"hash": -6886017480040308000,
"line_mean": 29.3726937269,
"line_max": 78,
"alpha_frac": 0.5713765035,
"autogenerated": false,
"ratio": 3.374743747437474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 271
} |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
import threading
import warnings
from operator import itemgetter
from . import _config
from ._compat import (
PY2, isclass, iteritems, metadata_proxy, ordered_dict, set_closure_cell
)
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar")
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xc0ffee
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata=None, type=None, converter=None,
factory=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param callable factory: Syntactic sugar for
``default=attr.Factory(callable)``.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
.. versionadded:: 18.1.0
``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError(
"The `factory` argument must be a callable."
)
default = Factory(factory)
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited
"super_attrs_map", # map inherited attributes to their originating classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The string comparison hack is used to avoid evaluating all string
annotations which would put attrs-based classes at a performance
disadvantage compared to plain old classes.
"""
return str(annot).startswith(_classvar_prefixes)
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _counter_getter(e):
"""
Key function for sorting to avoid re-creating a lambda for every class.
"""
return e[1].counter
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [
(name, ca)
for name, ca
in iteritems(these)
]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: " +
", ".join(sorted(
unannotated,
key=lambda n: cd.get(n).counter
)) + "."
)
else:
ca_list = sorted((
(name, attr)
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
), key=lambda e: e[1].counter)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name),
)
for attr_name, ca
in ca_list
]
super_attrs = []
super_attr_map = {} # A dictionary of superattrs to their classes.
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for super_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(super_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
super_attrs.append(a)
taken_attr_names[a.name] = a
super_attr_map[a.name] = super_cls
attr_names = [a.name for a in super_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
attrs = AttrsClass(
super_attrs + [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name)
)
for attr_name, ca
in ca_list
]
)
had_default = False
for a in attrs:
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
return _Attributes((attrs, super_attrs, super_attr_map))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots",
"_frozen", "_has_post_init", "_delete_attribs", "_super_attr_map",
)
def __init__(self, cls, these, slots, frozen, auto_attribs):
attrs, super_attrs, super_map = _transform_attrs(
cls, these, auto_attribs
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._super_names = set(a.name for a in super_attrs)
self._super_attr_map = super_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_superclass(cls)
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used anymore after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
super_names = self._super_names
# Clean class of attribute definitions (`attr.ib()`s).
if self._delete_attribs:
for name in self._attr_names:
if name not in super_names and \
getattr(cls, name, None) is not None:
delattr(cls, name)
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
super_names = self._super_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__",)
}
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
cd["__slots__"] = tuple(
name
for name in self._attr_names
if name not in super_names
)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(
getattr(self, name)
for name in state_attr_names
)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(
self._cls.__name__,
self._cls.__bases__,
cd,
)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(self._attrs)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
self._slots,
self._super_attr_map,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \
cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__,)
)
except AttributeError:
pass
return method
def attrs(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False, auto_attribs=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
builder.make_unhashable()
if init is True:
builder.add_init()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
lines = [
"def __hash__(self):",
" return hash((",
" %d," % (type_hash,),
]
for a in attrs:
lines.append(" self.%s," % (a.name))
lines.append(" ))")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [
" ) == (",
]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \
_make_cmp(attrs)
return cls
_already_repring = threading.local()
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(
a.name
for a in attrs
if a.repr
)
def __repr__(self):
"""
Automatically created by attrs.
"""
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# Since 'self' remains on the stack (i.e.: strongly referenced) for the
# duration of this call, it's safe to depend on id(...) stability, and
# not need to track the instance and therefore worry about properties
# like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name in attr_names:
if first:
first = False
else:
result.append(", ")
result.extend((name, "=", repr(getattr(self, name, NOTHING))))
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen, slots, super_attr_map):
attrs = [
a
for a in attrs
if a.init or a.default is not NOTHING
]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs, annotations = _attrs_to_init_script(
attrs,
frozen,
slots,
post_init,
super_attr_map,
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
__init__ = locs["__init__"]
__init__.__annotations__ = annotations
return __init__
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
_is_slot_cls(cls),
{},
)
return cls
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
:class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, super_attr_map):
"""
Check if the attribute name comes from a slot class.
"""
return a_name in super_attr_map and _is_slot_cls(super_attr_map[a_name])
def _attrs_to_init_script(attrs, frozen, slots, post_init, super_attr_map):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
any_slot_ancestors = any(
_is_slot_attr(a.name, super_attr_map)
for a in attrs
)
if frozen is True:
if slots is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
# class.
lines.append(
"_inst_dict = self.__dict__"
)
if any_slot_ancestors:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup
# per assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
if _is_slot_attr(attr_name, super_attr_map):
res = "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
else:
res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % {
"attr_name": attr_name,
"value_var": value_var,
}
return res
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
if _is_slot_attr(attr_name, super_attr_map):
tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))"
else:
tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)"
return tmpl % {
"attr_name": attr_name,
"value_var": value_var,
"c": conv_name,
}
else:
# Not frozen.
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
annotations = {'return': None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(" " + fmt_setter_with_converter(
attr_name, arg_name
))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
if a.init is True and a.converter is None and a.type is not None:
annotations[arg_name] = a.type
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals, annotations
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
For the version history of the fields, see :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"metadata", "type", "converter",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None, type=None, converter=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr("metadata", (
metadata_proxy(metadata) if metadata
else _empty_metadata_singleton
))
bound_setattr("type", type)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default", "type", "convert",
) # exclude methods and deprecated alias
}
return cls(
name=name, validator=ca._validator, default=ca._default, type=type,
**inst_dict
)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "converter", "type")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, converter,
metadata, type):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init}
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__",
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| {
"repo_name": "larsbergstrom/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/attrs/src/attr/_make.py",
"copies": "41",
"size": "55996",
"license": "mpl-2.0",
"hash": 7017228721665356000,
"line_mean": 32.1533451747,
"line_max": 79,
"alpha_frac": 0.556932638,
"autogenerated": false,
"ratio": 4.107085228106205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
import warnings
from operator import itemgetter
from . import _config
from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata=None, type=None, converter=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited from super classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The implementation is gross but importing `typing` is slow and there are
discussions to remove it from the stdlib alltogether.
"""
return str(annot).startswith("typing.ClassVar")
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = sorted((
(name, ca)
for name, ca
in iteritems(these)
), key=lambda e: e[1].counter)
elif auto_attribs is True:
ca_names = {
name
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: " +
", ".join(sorted(
unannotated,
key=lambda n: cd.get(n).counter
)) + "."
)
else:
ca_list = sorted((
(name, attr)
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
), key=lambda e: e[1].counter)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name),
)
for attr_name, ca
in ca_list
]
super_attrs = []
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for super_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(super_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
super_attrs.append(a)
taken_attr_names[a.name] = a
attr_names = [a.name for a in super_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
attrs = AttrsClass(
super_attrs + [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name)
)
for attr_name, ca
in ca_list
]
)
had_default = False
for a in attrs:
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
return _Attributes((attrs, super_attrs))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots",
"_frozen", "_has_post_init",
)
def __init__(self, cls, these, slots, frozen, auto_attribs):
attrs, super_attrs = _transform_attrs(cls, these, auto_attribs)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._super_names = set(a.name for a in super_attrs)
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_superclass(cls)
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used anymore after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
super_names = self._super_names
# Clean class of attribute definitions (`attr.ib()`s).
for name in self._attr_names:
if name not in super_names and \
getattr(cls, name, None) is not None:
delattr(cls, name)
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
super_names = self._super_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__",)
}
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
cd["__slots__"] = tuple(
name
for name in self._attr_names
if name not in super_names
)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
attr_names = tuple(self._attr_names)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in attr_names)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(
self._cls.__name__,
self._cls.__bases__,
cd,
)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(self._attrs)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \
cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__,)
)
except AttributeError:
pass
return method
def attrs(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False, auto_attribs=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
.. versionadded:: 17.3.0 *auto_attribs*
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
builder.make_unhashable()
if init is True:
builder.add_init()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
lines = [
"def __hash__(self):",
" return hash((",
" %d," % (type_hash,),
]
for a in attrs:
lines.append(" self.%s," % (a.name))
lines.append(" ))")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [
" ) == (",
]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \
_make_cmp(attrs)
return cls
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(
a.name
for a in attrs
if a.repr
)
def __repr__(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name, NOTHING))
for name in attr_names
)
)
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen):
attrs = [
a
for a in attrs
if a.init or a.default is not NOTHING
]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_init_script(
attrs,
frozen,
post_init,
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__init__"]
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
)
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_init_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(" " + fmt_setter_with_converter(
attr_name, arg_name
))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
For the version history of the fields, see :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"metadata", "type", "converter",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None, type=None, converter=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr("metadata", (
metadata_proxy(metadata) if metadata
else _empty_metadata_singleton
))
bound_setattr("type", type)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default", "type", "convert",
) # exclude methods and deprecated alias
}
return cls(
name=name, validator=ca._validator, default=ca._default, type=type,
**inst_dict
)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "converter", "type")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, converter,
metadata, type):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init}
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__",
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| {
"repo_name": "nparley/mylatitude",
"path": "lib/attr/_make.py",
"copies": "1",
"size": "49291",
"license": "mit",
"hash": 7896157672638334000,
"line_mean": 31.6214427531,
"line_max": 79,
"alpha_frac": 0.5585603863,
"autogenerated": false,
"ratio": 4.09495721525297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.515351760155297,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
from operator import itemgetter
from . import _config
from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_convert_pat = "__attr_convert_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata={}, type=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable convert: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
convert=convert,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited from super classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The implementation is gross but importing `typing` is slow and there are
discussions to remove it from the stdlib alltogether.
"""
return str(annot).startswith("typing.ClassVar")
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = sorted((
(name, ca)
for name, ca
in iteritems(these)
), key=lambda e: e[1].counter)
elif auto_attribs is True:
ca_names = {
name
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: " +
", ".join(sorted(
unannotated,
key=lambda n: cd.get(n).counter
)) + "."
)
else:
ca_list = sorted((
(name, attr)
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
), key=lambda e: e[1].counter)
non_super_attrs = [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name),
)
for attr_name, ca
in ca_list
]
# Walk *down* the MRO for attributes. While doing so, we collect the names
# of attributes we've seen in `take_attr_names` and ignore their
# redefinitions deeper in the hierarchy.
super_attrs = []
taken_attr_names = {a.name: a for a in non_super_attrs}
for super_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(super_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
# We iterate over sub_attrs backwards so we can reverse the whole
# list in the end and get all attributes in the order they have
# been defined.
for a in reversed(sub_attrs):
prev_a = taken_attr_names.get(a.name)
if prev_a is None:
super_attrs.append(a)
taken_attr_names[a.name] = a
elif prev_a == a:
# This happens thru multiple inheritance. We don't want
# to favor attributes that are further down in the tree
# so we move them to the back.
super_attrs.remove(a)
super_attrs.append(a)
# Now reverse the list, such that the attributes are sorted by *descending*
# age. IOW: the oldest attribute definition is at the head of the list.
super_attrs.reverse()
attr_names = [a.name for a in super_attrs + non_super_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
attrs = AttrsClass(
super_attrs + [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name)
)
for attr_name, ca
in ca_list
]
)
had_default = False
for a in attrs:
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
return _Attributes((attrs, super_attrs))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots",
"_frozen", "_has_post_init",
)
def __init__(self, cls, these, slots, frozen, auto_attribs):
attrs, super_attrs = _transform_attrs(cls, these, auto_attribs)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._super_names = set(a.name for a in super_attrs)
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_superclass(cls)
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used anymore after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
super_names = self._super_names
# Clean class of attribute definitions (`attr.ib()`s).
for name in self._attr_names:
if name not in super_names and \
getattr(cls, name, None) is not None:
delattr(cls, name)
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
super_names = self._super_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__",)
}
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
cd["__slots__"] = tuple(
name
for name in self._attr_names
if name not in super_names
)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
attr_names = tuple(self._attr_names)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in attr_names)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(
self._cls.__name__,
self._cls.__bases__,
cd,
)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = _make_repr(self._attrs, ns=ns)
return self
def add_str(self):
repr_ = self._cls_dict.get("__repr__")
if repr_ is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
self._cls_dict["__str__"] = repr_
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = _make_hash(self._attrs)
return self
def add_init(self):
self._cls_dict["__init__"] = _make_init(
self._attrs,
self._has_post_init,
self._frozen,
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \
cd["__ge__"] = _make_cmp(self._attrs)
return self
def attrs(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False, auto_attribs=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
.. versionadded:: 17.3.0 *auto_attribs*
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
builder.make_unhashable()
if init is True:
builder.add_init()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
lines = [
"def __hash__(self):",
" return hash((",
" %d," % (type_hash,),
]
for a in attrs:
lines.append(" self.%s," % (a.name))
lines.append(" ))")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs)
return cls
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def eq(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
"""
Automatically created by attrs.
"""
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, lt, le, gt, ge
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \
_make_cmp(attrs)
return cls
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(
a.name
for a in attrs
if a.repr
)
def repr_(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name))
for name in attr_names
)
)
return repr_
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
repr_ = _make_repr(attrs, ns)
cls.__repr__ = repr_
return cls
def _make_init(attrs, post_init, frozen):
attrs = [
a
for a in attrs
if a.init or a.default is not NOTHING
]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_init_script(
attrs,
frozen,
post_init,
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__init__"]
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
)
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_init_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(" " + fmt_setter_with_converter(attr_name,
arg_name))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"convert", "metadata", "type"
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None, type=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("convert", convert)
bound_setattr("metadata", (metadata_proxy(metadata) if metadata
else _empty_metadata_singleton))
bound_setattr("type", type)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default", "type"
) # exclude methods
}
return cls(name=name, validator=ca._validator, default=ca._default,
type=type, **inst_dict)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "convert", "type")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, convert,
metadata, type):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.convert = convert
self.metadata = metadata
self.type = type
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init}
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__",
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| {
"repo_name": "anthgur/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/attrs/src/attr/_make.py",
"copies": "12",
"size": "46098",
"license": "mpl-2.0",
"hash": -2407760612302266400,
"line_mean": 32.0451612903,
"line_max": 79,
"alpha_frac": 0.5625189813,
"autogenerated": false,
"ratio": 4.07622247767265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000010541851149061775,
"num_lines": 1395
} |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
from operator import itemgetter
from . import _config
from ._compat import PY2, iteritems, isclass, iterkeys, metadata_proxy
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_convert_pat = "__attr_convert_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attr(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata={}):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable datatypes like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable convert: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default .
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
convert=convert,
metadata=metadata,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
def _transform_attrs(cls, these):
"""
Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
list in `__attrs_attrs__`.
If *these* is passed, use that and don't look for them on the class.
"""
super_cls = []
for c in reversed(cls.__mro__[1:-1]):
sub_attrs = getattr(c, "__attrs_attrs__", None)
if sub_attrs is not None:
super_cls.extend(a for a in sub_attrs if a not in super_cls)
if these is None:
ca_list = [(name, attr)
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = [(name, ca)
for name, ca
in iteritems(these)]
non_super_attrs = [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
]
attr_names = [a.name for a in super_cls + non_super_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
cls.__attrs_attrs__ = AttrsClass(super_cls + [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
])
had_default = False
for a in cls.__attrs_attrs__:
if these is None and a not in super_cls:
setattr(cls, a.name, a)
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
def attributes(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
represantation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initialiazes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
if repr is False and str is True:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
if slots:
# Only need this later if we're using slots.
if these is None:
ca_list = [name
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = list(iterkeys(these))
_transform_attrs(cls, these)
# Can't just re-use frozen name because Python's scoping. :(
# Can't compare function objects because Python 2 is terrible. :(
effectively_frozen = _has_frozen_superclass(cls) or frozen
if repr is True:
cls = _add_repr(cls, ns=repr_ns)
if str is True:
cls.__str__ = cls.__repr__
if cmp is True:
cls = _add_cmp(cls)
if hash is not True and hash is not False and hash is not None:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
cls = _add_hash(cls)
else:
cls.__hash__ = None
if init is True:
cls = _add_init(cls, effectively_frozen)
if effectively_frozen is True:
cls.__setattr__ = _frozen_setattrs
cls.__delattr__ = _frozen_delattrs
if slots is True:
# slots and frozen require __getstate__/__setstate__ to work
cls = _add_pickle(cls)
if slots is True:
cls_dict = dict(cls.__dict__)
cls_dict["__slots__"] = tuple(ca_list)
for ca_name in ca_list:
# It might not actually be in there, e.g. if using 'these'.
cls_dict.pop(ca_name, None)
cls_dict.pop("__dict__", None)
qualname = getattr(cls, "__qualname__", None)
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
if qualname is not None:
cls.__qualname__ = qualname
return cls
# attrs_or class type depends on the usage of the decorator. It's a class
# if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _add_hash(cls, attrs=None):
"""
Add a hash method to *cls*.
"""
if attrs is None:
attrs = [a
for a in cls.__attrs_attrs__
if a.hash is True or (a.hash is None and a.cmp is True)]
def hash_(self):
"""
Automatically created by attrs.
"""
return hash(_attrs_to_tuple(self, attrs))
cls.__hash__ = hash_
return cls
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.cmp]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def eq(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
"""
Automatically created by attrs.
"""
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
cls.__eq__ = eq
cls.__ne__ = ne
cls.__lt__ = lt
cls.__le__ = le
cls.__gt__ = gt
cls.__ge__ = ge
return cls
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.repr]
def repr_(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(a.name + "=" + repr(getattr(self, a.name))
for a in attrs)
)
cls.__repr__ = repr_
return cls
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
attrs = [a for a in cls.__attrs_attrs__
if a.init or a.default is not NOTHING]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_script(
attrs,
frozen,
getattr(cls, "__attrs_post_init__", False),
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
init = locs["__init__"]
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename
)
cls.__init__ = init
return cls
def _add_pickle(cls):
"""
Add pickle helpers, needed for frozen and slotted classes
"""
def _slots_getstate__(obj):
"""
Play nice with pickle.
"""
return tuple(getattr(obj, a.name) for a in fields(obj.__class__))
def _slots_setstate__(obj, state):
"""
Play nice with pickle.
"""
__bound_setattr = _obj_setattr.__get__(obj, Attribute)
for a, value in zip(fields(obj.__class__), state):
__bound_setattr(a.name, value)
cls.__getstate__ = _slots_getstate__
cls.__setstate__ = _slots_setstate__
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accesors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(" " + fmt_setter_with_converter(attr_name,
arg_name))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"convert", "metadata",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("convert", convert)
bound_setattr("metadata", (metadata_proxy(metadata) if metadata
else _empty_metadata_singleton))
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca):
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default",
) # exclude methods
}
return cls(name=name, validator=ca._validator, default=ca._default,
**inst_dict)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "convert")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, convert,
metadata):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.convert = convert
self.metadata = metadata
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attributes(slots=True, init=False)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attr()
takes_self = attr()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attr()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
return attributes(**attributes_arguments)(type(name, bases, cls_dict))
# These are required by whithin this module so we define them here and merely
# import into .validators.
@attributes(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attr()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| {
"repo_name": "whitehorse-io/encarnia",
"path": "pyenv/lib/python2.7/site-packages/attr/_make.py",
"copies": "2",
"size": "35624",
"license": "mit",
"hash": -3406400153928851000,
"line_mean": 32.6392823418,
"line_max": 79,
"alpha_frac": 0.5631596676,
"autogenerated": false,
"ratio": 4.084852654512098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5648012322112097,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import hashlib
import os.path
import pytest
from appr import SYSTEM
from appr.pack import unpack_kub, ignore
TAR_MD5SUM = {
'darwin': "8ccd8af6ef21af7309839f1c521b6354",
'linux': "8ccd8af6ef21af7309839f1c521b6354",
'windows': '20bf57e5d5dec33e51bf1f04bfde4367'
}
KUBEUI_FILES = [
"manifest.yaml", "README.md", "templates/kube-ui-rc.yaml", "templates/kube-ui-svc.yaml"
]
def _check_app(path):
for f in KUBEUI_FILES:
assert os.path.exists(os.path.join(str(path), f))
def test_unpack_kub(pack_tar, tmpdir):
unpack_kub(pack_tar, str(tmpdir))
_check_app(str(tmpdir))
def test_extract(kubeui_package, tmpdir):
d = tmpdir.mkdir("extract")
kubeui_package.extract(str(d))
_check_app(str(d))
def test_pack(kubeui_package, tmpdir):
d = str(tmpdir.mkdir("pack")) + "/kube-ui.tar"
kubeui_package.pack(d)
assert hashlib.md5(open(d, "r").read()).hexdigest() == TAR_MD5SUM[SYSTEM]
def test_tree(kubeui_package):
files = kubeui_package.tree()
assert sorted(files) == sorted(KUBEUI_FILES)
def test_tree_filter(kubeui_package):
files = kubeui_package.tree("templates")
assert sorted(files) == sorted(["templates/kube-ui-rc.yaml", "templates/kube-ui-svc.yaml"])
def test_file(kubeui_package, data_dir):
manifest = kubeui_package.file("manifest.yaml")
assert manifest == open(data_dir + "/kube-ui/manifest.yaml", "r").read()
@pytest.mark.parametrize(
"pattern,file_path,expected",
[
('helm.txt', "helm.txt", True),
('helm.*', "helm.txt", True),
('helm.*', "rudder.txt", False),
('*.txt', "tiller.txt", True),
('*.txt', "cargo/a.txt", True),
('cargo/*.txt', "cargo/a.txt", True),
('cargo/*.*', "cargo/a.txt", True),
('cargo/*.txt', "mast/a.txt", False),
('ru[c-e]?er.txt', "rudder.txt", True),
('templates/.?*', "templates/.dotfile", True),
# Directory tests
(".git", ".git/toto", True),
(".git", ".git/toto/titi", True),
('cargo/', "cargo/", True),
('cargo/', "cargo/a.txt", True),
('cargo/', "mast/", False),
('helm.txt/', "helm.txt", False),
# // Negation tests
('!helm.txt', "helm.txt", False),
('helm.txt\n!helm.txt', "helm.txt", False),
('*\n!helm.txt', "tiller.txt", True),
('*\n!*.txt', "cargo", True),
('*\n!cargo/', "mast/a", True),
# Absolute path tests
('/a.txt', "a.txt", True),
('/a.txt', "cargo/a.txt", False),
('/cargo/a.txt', "cargo/a.txt", True),
])
def test_ignore(pattern, file_path, expected):
assert ignore(pattern, file_path) is expected
| {
"repo_name": "app-registry/appr",
"path": "tests/test_packager.py",
"copies": "2",
"size": "2742",
"license": "apache-2.0",
"hash": -4443208371163198500,
"line_mean": 28.8043478261,
"line_max": 95,
"alpha_frac": 0.5882567469,
"autogenerated": false,
"ratio": 2.8682008368200838,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44564575837200837,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import heapq
import sys
import random
import numpy as np
from .hashfunctions import generate_hashfunctions
class CountMinSketch(object):
""" Basic Count-Min Sketch """
def __init__(self, delta, epsilon, k):
self.nbr_bits = int(np.ceil(np.exp(1) / epsilon))
self.nbr_slices = int(np.ceil(np.log(1 / delta)))
self.k = k
self.count = np.zeros((self.nbr_slices, self.nbr_bits), dtype=np.int32)
self.heap = []
self.top_k = {}
self.make_hashes = generate_hashfunctions(self.nbr_bits, self.nbr_slices)
def update(self, key, increment):
for row, column in enumerate(self.make_hashes(key)):
self.count[int(row), int(column)] += increment
return self.update_heap(key)
def update_heap(self, key):
estimate = self.get(key)
poped = key
if key in self.top_k:
old_pair = self.top_k.get(key)
old_pair[0] = estimate
heapq.heapify(self.heap)
poped = None
else:
if len(self.top_k) < self.k:
heapq.heappush(self.heap, [estimate, key])
self.top_k[key] = [estimate, key]
poped = None
else:
new_pair = [estimate, key]
old_pair = heapq.heappushpop(self.heap, new_pair)
poped = old_pair[1]
if old_pair[1] in self.top_k:
del self.top_k[old_pair[1]]
self.top_k[key] = new_pair
return poped
def get(self, key):
value = float('inf')
for row, column in enumerate(self.make_hashes(key)):
value = min(self.count[row, column], value)
return value
if __name__ == "__main__":
import random
import time
stream = []
for i in range(100):
stream = stream + [str(i)] * i
cms = CountMinSketch(10**-3, 0.01, 10)
random.shuffle(stream)
t1 = time.time()
for s in stream:
p = cms.update(s, 1)
print(time.time() - t1)
| {
"repo_name": "Parsely/probably",
"path": "probably/countmin.py",
"copies": "1",
"size": "2109",
"license": "mit",
"hash": 4714278930368098000,
"line_mean": 27.5,
"line_max": 81,
"alpha_frac": 0.5467045993,
"autogenerated": false,
"ratio": 3.457377049180328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9433187583683824,
"avg_score": 0.014178812959300763,
"num_lines": 74
} |
from __future__ import absolute_import, division, print_function
import hmac
import json
import time
from collections import OrderedDict
from hashlib import sha256
import stripe
from stripe import error, util
class Webhook(object):
DEFAULT_TOLERANCE = 300
@staticmethod
def construct_event(
payload, sig_header, secret, tolerance=DEFAULT_TOLERANCE, api_key=None
):
if hasattr(payload, "decode"):
payload = payload.decode("utf-8")
WebhookSignature.verify_header(payload, sig_header, secret, tolerance)
data = json.loads(payload, object_pairs_hook=OrderedDict)
event = stripe.Event.construct_from(data, api_key or stripe.api_key)
return event
class WebhookSignature(object):
EXPECTED_SCHEME = "v1"
@staticmethod
def _compute_signature(payload, secret):
mac = hmac.new(
secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=sha256,
)
return mac.hexdigest()
@staticmethod
def _get_timestamp_and_signatures(header, scheme):
list_items = [i.split("=", 2) for i in header.split(",")]
timestamp = int([i[1] for i in list_items if i[0] == "t"][0])
signatures = [i[1] for i in list_items if i[0] == scheme]
return timestamp, signatures
@classmethod
def verify_header(cls, payload, header, secret, tolerance=None):
try:
timestamp, signatures = cls._get_timestamp_and_signatures(
header, cls.EXPECTED_SCHEME
)
except Exception:
raise error.SignatureVerificationError(
"Unable to extract timestamp and signatures from header",
header,
payload,
)
if not signatures:
raise error.SignatureVerificationError(
"No signatures found with expected scheme "
"%s" % cls.EXPECTED_SCHEME,
header,
payload,
)
signed_payload = "%d.%s" % (timestamp, payload)
expected_sig = cls._compute_signature(signed_payload, secret)
if not any(util.secure_compare(expected_sig, s) for s in signatures):
raise error.SignatureVerificationError(
"No signatures found matching the expected signature for "
"payload",
header,
payload,
)
if tolerance and timestamp < time.time() - tolerance:
raise error.SignatureVerificationError(
"Timestamp outside the tolerance zone (%d)" % timestamp,
header,
payload,
)
return True
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/webhook.py",
"copies": "1",
"size": "2720",
"license": "mit",
"hash": 8739211386317530000,
"line_mean": 29.9090909091,
"line_max": 78,
"alpha_frac": 0.5878676471,
"autogenerated": false,
"ratio": 4.51077943615257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.559864708325257,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
from typing import List, Optional, Text
import numpy as np
import tensorflow as tf
from six import string_types
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import (assert_util, prefer_static,
tensorshape_util)
from tensorflow_probability.python.layers.internal import \
distribution_tensor_coercible
from odin.bay import distributions as obd
from odin.bay.distributions.negative_binomial_disp import NegativeBinomialDisp
from odin.bay.distributions.zero_inflated import ZeroInflated
__all__ = ['concat_distribution']
# ===========================================================================
# Helpers
# ===========================================================================
# must hand define all the parameters here
# NOTE: this list is to be updated, or a smarter solution for automatically
# mining all the parameters
dist_params = {
# complex
obd.Independent: ['distribution', 'reinterpreted_batch_ndims'],
ZeroInflated: ['count_distribution', 'inflated_distribution'],
obd.MixtureSameFamily: ['mixture_distribution', 'components_distribution'],
# Exponential
obd.Gamma: ['concentration', 'rate'],
# Gaussians
obd.Normal: ['loc', 'scale'],
obd.LogNormal: ['loc', 'scale'],
obd.MultivariateNormalDiag: ['loc', 'scale'],
obd.MultivariateNormalTriL: ['loc', 'scale'],
obd.MultivariateNormalFullCovariance: ['loc', 'scale'],
# Count
NegativeBinomialDisp: ['loc', 'disp'],
obd.NegativeBinomial: ['total_count', 'logits_parameter'],
obd.Poisson: ['log_rate_parameter'],
# Binary and probability
obd.Gumbel: ['loc', 'scale'],
obd.Bernoulli: ['logits_parameter'],
obd.Dirichlet: ['concentration'],
obd.Beta: ['concentration1', 'concentration0'],
obd.OneHotCategorical: ['logits_parameter'],
obd.Categorical: ['logits_parameter'],
# others
obd.Laplace: ['loc', 'scale'],
obd.Wishart: ['df', 'scale'],
obd.Uniform: ['low', 'high'],
obd.Multinomial: ['total_count', 'logits_parameter'],
obd.Deterministic: ['loc', 'atol', 'rtol'],
obd.VectorDeterministic: ['loc', 'atol', 'rtol'],
}
for dist_type, attr_names in dist_params.items():
assert isinstance(attr_names, (tuple, list)) and all(
isinstance(name, string_types) for name in attr_names), \
"Error defining parameters of distributions"
assert isinstance(dist_type, type) and issubclass(dist_type, obd.Distribution),\
"Error defining parameters of distributions"
assert all(hasattr(dist_type, name) for name in attr_names), \
"Error defining parameters of distributions"
# ===========================================================================
# Main code
# ===========================================================================
def _find_axis_for_stack(dists, given_axis):
# check event shape is consistent
if given_axis is not None:
return int(given_axis)
event_shape = dists[0].event_shape
batch_shape = dists[0].batch_shape
assertions = []
for d in dists[1:]:
assertions.append(tf.assert_equal(event_shape, d.event_shape))
assertions.append(tf.assert_equal(batch_shape.ndims, d.batch_shape.ndims))
with tf.control_dependencies(assertions):
axis = []
for d in dists:
shape = d.batch_shape
for ax, (i, j) in enumerate(zip(batch_shape, shape)):
if i != j:
axis.append(ax)
if len(axis) == 0:
return 0
assert len(set(axis)) == 1, \
"Multiple dimensions are found to be different among the distributions, "\
"expect only 1 different dimension."
return axis[0]
def concat_distribution(dists: List[tfd.Distribution],
axis: Optional[int] = None,
validate_args: bool = False,
allow_nan_stats: bool = True,
name: Optional[Text] = None) -> tfd.Distribution:
""" This layer create a new `Distribution` by concatenate parameters of
multiple distributions of the same type along given `axis`
Note
----
If your distribution is the output from
`tensorflow_probability.DistributionLambda`, this function will remove all
the keras tracking ultilities, for better solution checkout
`odin.networks.distribution_util_layer.ConcatDistribution`
"""
if not isinstance(dists, (tuple, list)):
dists = [dists]
if len(dists) == 1:
return dists[0]
if len(dists) == 0:
raise ValueError("No distributions were given")
axis = _find_axis_for_stack(dists, given_axis=axis)
t = type(dists[0])
is_keras_output = False
# _TensorCoercible will messing up with the parameters of the
# distribution
if issubclass(t, distribution_tensor_coercible._TensorCoercible):
is_keras_output = True
t = type.mro(t)[2]
assert issubclass(t, tfd.Distribution) and not issubclass(
t, distribution_tensor_coercible._TensorCoercible)
# no more distribution, tensor of parameters is return during the
# recursive operator
if issubclass(t, tf.Tensor):
if dists[0].shape.ndims == 0:
return dists[0] # TODO: better solution here
return tf.concat(dists, axis=axis)
elif issubclass(t, obd.Distribution):
pass # continue with all distribution parameters
else:
return dists[0]
# get all params for concatenate
if t not in dist_params:
raise RuntimeError("Unknown distribution of type '%s' for concatenation" %
str(t))
params_name = dist_params[t]
# start concat the params
params = {}
for p in params_name:
attrs = [getattr(d, p) for d in dists]
is_method = False
if inspect.ismethod(attrs[0]):
attrs = [a() for a in attrs]
is_method = True
if is_method and '_parameter' == p[-10:]:
p = p[:-10]
params[p] = concat_distribution(attrs, axis=axis)
# extra arguments
if name is not None:
params['name'] = name
args = inspect.getfullargspec(t.__init__).args
if 'allow_nan_stats' in args:
params['allow_nan_stats'] = allow_nan_stats
if 'validate_args' in args:
params['validate_args'] = validate_args
dist = t(**params)
return dist
| {
"repo_name": "imito/odin",
"path": "odin/bay/distributions/utils.py",
"copies": "1",
"size": "6375",
"license": "mit",
"hash": -799079870453626600,
"line_mean": 35.4285714286,
"line_max": 82,
"alpha_frac": 0.6407843137,
"autogenerated": false,
"ratio": 3.90625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50470343137,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import datetime
import tempfile
import os
import shutil
import numpy as np
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from multipledispatch import Dispatcher
from datashape import dshape, Record
from datashape.discovery import is_zero_time
from toolz import pluck, get, curry, keyfilter
from .compatibility import unicode
sample = Dispatcher('sample')
def iter_except(func, exception, first=None):
"""Call a `func` repeatedly until `exception` is raised. Optionally call
`first` first.
Parameters
----------
func : callable
Repeatedly call this until `exception` is raised.
exception : Exception
Stop calling `func` when this is raised.
first : callable, optional, default ``None``
Call this first if it isn't ``None``.
Examples
--------
>>> x = {'a': 1, 'b': 2}
>>> def iterate():
... yield 'a'
... yield 'b'
... yield 'c'
...
>>> keys = iterate()
>>> diter = iter_except(lambda: x[next(keys)], KeyError)
>>> list(diter)
[1, 2]
Notes
-----
* Taken from https://docs.python.org/2/library/itertools.html#recipes
"""
try:
if first is not None:
yield first()
while 1: # True isn't a reserved word in Python 2.x
yield func()
except exception:
pass
def ext(filename):
_, e = os.path.splitext(filename)
return e.lstrip(os.extsep)
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
try:
yield filename
finally:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
try:
yield list(d)
finally:
for filename in d:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and is_zero_time(dt.time()):
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def records_to_tuples(ds, data):
""" Transform records into tuples
Examples
--------
>>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> list(records_to_tuples('var * {a: int, b: int}', seq))
[(1, 10), (2, 20)]
>>> records_to_tuples('{a: int, b: int}', seq[0]) # single elements
(1, 10)
>>> records_to_tuples('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
tuples_to_records
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
return pluck(ds.measure.names, data, default=None)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
return get(ds.measure.names, data)
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
def tuples_to_records(ds, data):
""" Transform tuples into records
Examples
--------
>>> seq = [(1, 10), (2, 20)]
>>> list(tuples_to_records('var * {a: int, b: int}', seq)) # doctest: +SKIP
[{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> tuples_to_records('{a: int, b: int}', seq[0]) # doctest: +SKIP
{'a': 1, 'b': 10}
>>> tuples_to_records('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
records_to_tuples
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
names = ds.measure.names
return (dict(zip(names, tup)) for tup in data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
names = ds.measure.names
return dict(zip(names, data))
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def into_path(*path):
""" Path to file in into directory
>>> into_path('backends', 'tests', 'myfile.csv') # doctest: +SKIP
'/home/user/odo/odo/backends/tests/myfile.csv'
"""
import odo
return os.path.join(os.path.dirname(odo.__file__), *path)
@curry
def pmap(f, iterable):
"""Map `f` over `iterable` in parallel using a ``ThreadPool``.
"""
p = ThreadPool()
try:
result = p.map(f, iterable)
finally:
p.terminate()
return result
@curry
def write(triple, writer):
"""Write a file using the input from `gentemp` using `writer` and return
its index and filename.
Parameters
----------
triple : tuple of int, str, str
The first element is the index in the set of chunks of a file, the
second element is the path to write to, the third element is the data
to write.
Returns
-------
i, filename : int, str
File's index and filename. This is used to return the index and
filename after splitting files.
Notes
-----
This could be adapted to write to an already open handle, which would
allow, e.g., multipart gzip uploads. Currently we open write a new file
every time.
"""
i, filename, data = triple
with writer(filename, mode='wb') as f:
f.write(data)
return i, filename
def gentemp(it, suffix=None, start=0):
"""Yield an index, a temp file, and data for each element in `it`.
Parameters
----------
it : Iterable
suffix : str or ``None``, optional
Suffix to add to each temporary file's name
start : int, optional
A integer indicating where to start the numbering of chunks in `it`.
"""
for i, data in enumerate(it, start=start): # aws needs parts to start at 1
with tmpfile('.into') as fn:
yield i, fn, data
@curry
def split(filename, nbytes, suffix=None, writer=open, start=0):
"""Split a file into chunks of size `nbytes` with each filename containing
a suffix specified by `suffix`. The file will be written with the ``write``
method of an instance of `writer`.
Parameters
----------
filename : str
The file to split
nbytes : int
Split `filename` into chunks of this size
suffix : str, optional
writer : callable, optional
Callable object to use to write the chunks of `filename`
"""
with open(filename, mode='rb') as f:
byte_chunks = iter(curry(f.read, nbytes), '')
return pmap(write(writer=writer),
gentemp(byte_chunks, suffix=suffix, start=start))
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
"""
return keyfilter(keywords(f).__contains__, kwargs)
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Paramaters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to
| {
"repo_name": "cpcloud/odo",
"path": "odo/utils.py",
"copies": "1",
"size": "10214",
"license": "bsd-3-clause",
"hash": -1521450950646808300,
"line_mean": 24.2197530864,
"line_max": 80,
"alpha_frac": 0.5618758567,
"autogenerated": false,
"ratio": 3.8427389014296462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904157510758819,
"avg_score": 0.00009144947416552355,
"num_lines": 405
} |
from __future__ import absolute_import, division, print_function
import inspect
import io
import numbers
import os
import re
import string
import tarfile
import types
import warnings
from collections import (Iterable, Iterator, Mapping, OrderedDict, defaultdict,
deque)
from contextlib import contextmanager
from datetime import datetime
import numpy as np
from six import add_metaclass, string_types
from six.moves import cPickle
# ===========================================================================
# File type check
# ===========================================================================
GZIP_MAGIC_NUMBER = "1f8b"
def is_gzip_file(path):
""" Credit:
https://kite.com/python/examples/4945/gzip-check-if-a-file-is-gzip-compressed
"""
if isinstance(path, string_types) and os.path.isfile(path):
with open(path, 'rb') as f:
return f.read(2).encode("hex") == GZIP_MAGIC_NUMBER
elif hasattr(path, 'read') and hasattr(path, 'tell'):
last_pos = path.tell()
path.seek(0)
indicator = path.read(2)
indicator = (indicator.encode("hex")
if isinstance(indicator, string_types) else indicator.hex())
path.seek(last_pos)
return indicator == GZIP_MAGIC_NUMBER
return False
def is_tar_file(path):
if not os.path.isfile(path):
return False
return tarfile.is_tarfile(path)
# ===========================================================================
# Regular expression
# ===========================================================================
RE_NUMBER = re.compile(r'^[+-]*((\d*\.\d+)|(\d+))$')
# ===========================================================================
# Data structure
# ===========================================================================
class struct(dict):
'''Flexible object can be assigned any attribtues'''
def __init__(self, *args, **kwargs):
super(struct, self).__init__(*args, **kwargs)
# copy all dict to attr
for i, j in self.items():
if is_string(i) and not hasattr(self, i):
super(struct, self).__setattr__(i, j)
def __setattr__(self, name, val):
super(struct, self).__setattr__(name, val)
super(struct, self).__setitem__(name, val)
def __setitem__(self, x, y):
super(struct, self).__setitem__(x, y)
if is_string(x):
super(struct, self).__setattr__(x, y)
class bidict(dict):
""" Bi-directional dictionary (i.e. a <-> b)
Note
----
When you iterate over this dictionary, it will be a doubled size
dictionary
"""
def __init__(self, *args, **kwargs):
super(bidict, self).__init__(*args, **kwargs)
# this is duplication
self._inv = dict()
for i, j in self.items():
self._inv[j] = i
@property
def inv(self):
return self._inv
def __setitem__(self, key, value):
super(bidict, self).__setitem__(key, value)
self._inv[value] = key
return None
def __getitem__(self, key):
if key not in self:
return self._inv[key]
return super(bidict, self).__getitem__(key)
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
self._inv[v] = k
def __delitem__(self, key):
del self._inv[super(bidict, self).__getitem__(key)]
return dict.__delitem__(self, key)
class defaultdictkey(defaultdict):
""" Enhanced version of `defaultdict`, instead of return a
default value, return an "improvised" default value based on
the given key.
Example
-------
>>> from odin.utils.python_utils import defaultdictkey
>>> d = defaultdictkey(lambda x: str(x))
>>> print(d['123']) # '123'
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
def multikeysdict(d):
assert isinstance(d, dict)
new_d = d.__class__()
for i, j in d.items():
if isinstance(i, tuple):
for k in i:
new_d[k] = j
else:
new_d[i] = j
return new_d
# ===========================================================================
# Getter
# ===========================================================================
def get_formatted_datetime(only_number=True):
if only_number:
return "{:%H%M%S%d%m%y}".format(datetime.now())
return "{:%H:%M:%S-%d%b%y}".format(datetime.now())
def get_all_properties(obj):
""" Return all attributes which are properties of given Object
"""
properties = []
clazz = obj if isinstance(obj, type) else obj.__class__
for key in dir(clazz):
if '__' in key:
continue
val = getattr(clazz, key)
if isinstance(val, property):
properties.append(key)
return properties if isinstance(obj, type) else \
{p: getattr(obj, p) for p in properties}
def get_string_placeholders(s):
assert isinstance(s, string_types)
fmt = []
for (_, key, spec, _) in string.Formatter().parse(save_path):
if spec is not None:
fmt.append(key)
return tuple(fmt)
# ===========================================================================
# Data converter
# ===========================================================================
def as_tuple(x, N=None, t=None):
"""
Coerce a value to a tuple of given length (and possibly given type).
Parameters
----------
x : {value, iterable}
N : {integer}
length of the desired tuple
t : {type, call-able, optional}
required type for all elements
Returns
-------
tuple
``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.
Raises
------
TypeError
if `type` is given and `x` or any of its elements do not match it
ValueError
if `x` is iterable, but does not have exactly `N` elements
Note
----
This function is adpated from Lasagne
Original work Copyright (c) 2014-2015 lasagne contributors
All rights reserved.
LICENSE: https://github.com/Lasagne/Lasagne/blob/master/LICENSE
"""
# special case numpy array
if not isinstance(x, tuple):
if isinstance(x, (types.GeneratorType, list)):
x = tuple(x)
else:
x = (x,)
# ====== check length ====== #
if is_number(N):
N = int(N)
if len(x) == 1:
x = x * N
elif len(x) != N:
raise ValueError('x has length=%d, but required length N=%d' %
(len(x), N))
# ====== check type ====== #
if t is None:
filter_func = lambda o: True
elif isinstance(t, type) or isinstance(t, (tuple, list)):
filter_func = lambda o: isinstance(o, t)
elif hasattr(t, '__call__'):
filter_func = t
else:
raise ValueError("Invalid value for `t`: %s" % str(t))
if not all(filter_func(v) for v in x):
raise TypeError("expected a single value or an iterable "
"of {0}, got {1} instead".format(t.__name__, x))
return x
def as_list(x, N=None, t=None):
return list(as_tuple(x, N, t))
def as_bytes(x, nbytes=None, order='little'):
""" Convert some python object to bytes array, support type:
* string, unicode
* integer
* numpy.ndarray
Note
----
This method is SLOW
"""
if is_string(x):
return x.encode()
elif isinstance(x, int):
return x.to_bytes(nbytes, order, signed=False)
elif isinstance(x, np.ndarray):
return x.tobytes()
else:
raise ValueError("Not support bytes conversion for type: %s" %
type(x).__name__)
# ===========================================================================
# Types check
# ===========================================================================
def is_lambda(v):
LAMBDA = lambda: 0
return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__
def is_pickleable(x):
try:
cPickle.dumps(x, protocol=cPickle.HIGHEST_PROTOCOL)
return True
except cPickle.PickleError:
return False
def is_fileobj(f):
""" Check if an object `f` is intance of FileIO object created
by `open()`"""
return isinstance(f, io.TextIOBase) or \
isinstance(f, io.BufferedIOBase) or \
isinstance(f, io.RawIOBase) or \
isinstance(f, io.IOBase)
def is_callable(x):
return hasattr(x, '__call__')
def is_string(s):
return isinstance(s, string_types)
def is_path(path):
if is_string(path):
try:
os.path.exists(path)
return True
except Exception as e:
return False
return False
def is_number(i, string_number=False):
if isinstance(i, string_types) and string_number:
return RE_NUMBER.match(i) is not None
return isinstance(i, numbers.Number)
def is_bool(b):
return isinstance(b, type(True))
def is_primitives(x, inc_ndarray=True, exception_types=[]):
"""Primitive types include: number, string, boolean, None
and numpy.ndarray (optional) and numpy.generic (optional)
Parameters
----------
inc_ndarray: bool
if True, include `numpy.ndarray` and `numpy.generic` as a primitive types
"""
# complex list or Mapping
if isinstance(x, (tuple, list)):
return all(
is_primitives(
i, inc_ndarray=inc_ndarray, exception_types=exception_types)
for i in x)
elif isinstance(x, Mapping):
return all(
is_primitives(
i, inc_ndarray=inc_ndarray, exception_types=exception_types) and
is_primitives(
j, inc_ndarray=inc_ndarray, exception_types=exception_types)
for i, j in x.items())
# check for number, string, bool, and numpy array
if is_number(x) or is_string(x) or is_bool(x) or x is None or \
(any(isinstance(x, t) for t in exception_types)) or \
(inc_ndarray and isinstance(x, (np.ndarray, np.generic))):
return True
return False
# ===========================================================================
# IO utilities
# ===========================================================================
def get_all_files(path, filter_func=None):
''' Recurrsively get all files in the given path '''
file_list = []
if os.access(path, os.R_OK):
for p in os.listdir(path):
p = os.path.join(path, p)
if os.path.isdir(p):
file_list += get_all_files(p, filter_func)
else:
if filter_func is not None and not filter_func(p):
continue
# remove dump files of Mac
if '.DS_Store' in p or '.DS_STORE' in p or \
'._' == os.path.basename(p)[:2]:
continue
file_list.append(p)
return file_list
def get_all_ext(path):
""" Recurrsively get all extension of files in the given path
Parameters
----------
path : str
input folder
"""
file_list = []
if os.access(path, os.R_OK):
for p in os.listdir(path):
p = os.path.join(path, p)
if os.path.isdir(p):
file_list += get_all_ext(p)
else:
# remove dump files of Mac
if '.DS_Store' in p or '.DS_STORE' in p or \
'._' == os.path.basename(p)[:2]:
continue
ext = p.split('.')
if len(ext) > 1:
file_list.append(ext[-1])
file_list = list(set(file_list))
return file_list
def folder2bin(path):
""" This function read all files within a Folder
in binary mode,
then, store all the data in a dictionary mapping:
`relative_path -> binary_data`
"""
if not os.path.isdir(path):
raise ValueError('`path`=%s must be a directory.' % path)
path = os.path.abspath(path)
files = get_all_files(path)
data = {}
for f in files:
name = f.replace(path + '/', '')
with open(f, 'rb') as f:
data[name] = f.read()
return data
def bin2folder(data, path, override=False):
""" Convert serialized data from `folder2bin` back
to a folder at `path`
Parameters
----------
data: {string, dict}
if string, `data` can be pickled string, or path to a file.
if dict, `data` is the output from `folder2bin`
path: string
path to a folder
override: bool
if True, override exist folder at `path`
"""
# ====== check input ====== #
if is_string(data):
if os.path.isfile(data):
with open(data, 'rb') as f:
data = pickle.load(f)
else:
data = pickle.loads(data)
if not isinstance(data, dict):
raise ValueError(
"`data` must be dictionary type, or string, or path to file.")
# ====== check outpath ====== #
path = os.path.abspath(str(path))
if not os.path.exists(path):
os.mkdir(path)
elif os.path.isfile(path):
raise ValueError("`path` must be path to a directory.")
elif os.path.isdir(path):
if not override:
raise RuntimeError("Folder at path:%s exist, cannot override." % path)
shutil.rmtree(path)
os.mkdir(path)
# ====== deserialize ====== #
for name, dat in data.items():
with open(os.path.join(path, name), 'wb') as f:
f.write(dat)
return path
# ===========================================================================
# String processing
# ===========================================================================
_space_char = re.compile(r"\s")
_multiple_spaces = re.compile(r"\s\s+")
_non_alphanumeric_char = re.compile(r"\W")
def string_normalize(text,
lower=True,
remove_non_alphanumeric=True,
remove_duplicated_spaces=True,
remove_whitespace=False,
escape_pattern=False):
text = str(text).strip()
if bool(lower):
text = text.lower()
if bool(escape_pattern):
text = re.escape(text)
if bool(remove_non_alphanumeric):
text = _non_alphanumeric_char.sub(' ', text)
text = text.strip()
if bool(remove_duplicated_spaces):
text = _multiple_spaces.sub(' ', text)
if bool(remove_whitespace):
if isinstance(remove_whitespace, string_types):
text = _space_char.sub(remove_whitespace, text)
else:
text = _space_char.sub('', text)
return text
text_normalize = string_normalize
# ===========================================================================
# List utils
# ===========================================================================
def unique(seq, keep_order=False):
if keep_order:
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
else:
return list(set(seq))
# ===========================================================================
# Async file IO
# ===========================================================================
# TODO
# ===========================================================================
# Object and class attributes
# ===========================================================================
class abstractclassmethod(classmethod):
__isabstractmethod__ = True
def __init__(self, method):
method.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(method)
class classproperty(object):
def __init__(self, fn):
super(classproperty, self).__init__()
self.fn = fn
def __get__(self, obj, owner):
return self.fn(owner)
# ===========================================================================
# Path utils
# ===========================================================================
def select_path(*paths, default=None, create_new=False):
"""
Parameters
----------
paths : str
multiple path are given
default : str
default path for return
create_new : bool (default: False)
if no path is found, create new folder based on the
first path found to be `creat-able`
"""
all_paths = []
for p in paths:
if isinstance(p, (tuple, list)):
all_paths += p
elif isinstance(p, string_types):
all_paths.append(p)
else:
raise ValueError("Given `path` has type: '%s', which must be string or "
"list of string")
# ====== return the first found exists path ====== #
for p in all_paths:
if os.path.exists(p):
return p
# ====== check if create_new ====== #
if default is not None:
return str(default)
if create_new:
for p in paths:
base_dir = os.path.dirname(p)
if os.path.exists(base_dir):
os.mkdir(p)
return p
raise ValueError("Cannot create new folder from list: %s" % str(paths))
# ====== raise exception ====== #
raise RuntimeError("Cannot find any exists path from list: %s" %
'; '.join(all_paths))
# ===========================================================================
# Warnings and Exception
# ===========================================================================
@contextmanager
def catch_warnings_error(w):
""" This method turn any given warnings into exception
use: `warnings.Warning` for all warnings
Example
-------
>>> with catch_warnings([RuntimeWarning, UserWarning]):
>>> try:
>>> warnings.warn('test', category=RuntimeWarning)
>>> except RuntimeWarning as w:
>>> pass
"""
with warnings.catch_warnings():
warnings.filterwarnings(action='error', category=w)
yield
@contextmanager
def catch_warnings_ignore(w):
""" This method ignore any given warnings
use: `warnings.Warning` for all warnings
"""
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=w)
yield
| {
"repo_name": "imito/odin",
"path": "odin/utils/python_utils.py",
"copies": "1",
"size": "17126",
"license": "mit",
"hash": 8634052138890086000,
"line_mean": 26.8471544715,
"line_max": 79,
"alpha_frac": 0.5446105337,
"autogenerated": false,
"ratio": 3.86329799233025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.490790852603025,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import json
import sys
import weakref
from collections import namedtuple
from copy import copy
from functools import partial, wraps
from decorator import decorator as decorator
from .compat.contextlib import suppress
from .exceptions import KeyInvalidError
from .log import logger
__all__ = ()
FullArgSpec = namedtuple(
'FullArgSpec',
['args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'])
NormalizedArgs = namedtuple(
'NormalizedArgs',
['varargs', 'normargs', 'callargs'])
CachedCallInfo = namedtuple(
'CachedCallInfo',
['varargs', 'callargs', 'return_value', 'expiration_date'])
PrunedFilesInfo = namedtuple('PrunedFilesInfo', ['size', 'num'])
def fullargspec_from_argspec(argspec):
return FullArgSpec(
*argspec, kwonlyargs=[], kwonlydefaults=None, annotations={})
class DecoratorFactory(object):
"""Produce decorator to wrap a function with a bucket.
The decorator function is returned because using a class breaks
help(instance). See http://stackoverflow.com/a/25973438/2093785
"""
def __init__(self, bucket, method=False, nocache=None, callback=None,
ignore=None):
self.bucket = bucket
self.method = method
self.nocache = nocache
self.callback = callback
self.fref = None
self.property = False
if ignore is None:
ignore = ()
self.ignore = ignore
def decorate(self, f):
if isinstance(f, property):
f = f.fget
self.property = True
# method=True can be excluded when decorating a property because
# it's detectable. Set it now.
self.method = True
self.fref = weakref.ref(f)
# Try and use getargspec() first so that cache will work on source
# compatible with Python 2 and 3.
try:
argspec = inspect.getargspec(f)
argspec = fullargspec_from_argspec(argspec)
all_args = set(argspec.args)
except ValueError:
argspec = inspect.getfullargspec(f)
all_args = set(argspec.args)
all_args.update(argspec.kwonlyargs)
if self.nocache:
if self.nocache not in all_args:
raise TypeError("nocache decorator argument '{}'"
"missing from argspec.".format(self.nocache))
test_set = set(self.ignore)
# *args and **kwargs can be ignored too
if argspec.varargs in self.ignore:
test_set -= set([argspec.varargs])
if argspec.varkw in self.ignore:
test_set -= set([argspec.varkw])
raise_invalid_keys(all_args, test_set,
message='parameter{s} cannot be ignored if not '
'present in argspec: {keys}')
fsig = (f.__name__, argspec._asdict())
def load_or_call(f, key_hash, args, kwargs, varargs, callargs):
"""Load function result from cache, or call function and cache
result.
args and kwargs are used to call original function.
varargs and callargs are used to call callback.
"""
skip_cache = False
if self.nocache:
skip_cache = callargs[self.nocache]
def call_and_cache():
logger.info('Calling function {}', f)
res = f(*args, **kwargs)
obj = self.bucket._update_or_make_obj_with_hash(key_hash, res)
self.bucket._set_obj_with_hash(key_hash, obj)
return res
called = False
if skip_cache:
result = call_and_cache()
called = True
else:
try:
obj = self.bucket._get_obj_from_hash(key_hash)
result = obj.value
except KeyInvalidError:
result = call_and_cache()
called = True
else:
logger.info('Function call loaded from cache: {}', f)
if self.callback:
callinfo = CachedCallInfo(varargs, callargs, result,
obj.expiration_date)
if self.method:
instance = callargs[argspec.args[0]]
self.callback(instance, callinfo)
else:
self.callback(callinfo)
return result, called
def wrapper(f, *args, **kwargs):
normalized_args = normalize_args(f, *args, **kwargs)
varargs, normargs, callargs = normalized_args
sig_normargs = normargs.copy()
sig_varargs = copy(varargs)
# Delete nocache parameter from call arg used for signature.
if self.nocache:
del sig_normargs[self.nocache]
for arg in self.ignore:
if arg == argspec.varargs:
sig_varargs = ()
elif arg == argspec.varkw:
for kwarg in callargs[argspec.varkw]:
del sig_normargs[kwarg]
else:
del sig_normargs[arg]
if self.method:
instance = args[0]
# Delete instance parameter from call arg used for signature.
del sig_normargs[argspec.args[0]]
sig_instance = get_instance_signature(instance)
signature = (sig_instance, fsig, sig_varargs, sig_normargs)
else:
signature = (fsig, sig_varargs, sig_normargs)
# Make key_hash before function call, and raise error
# if state changes (hash is different) afterwards.
key_hash = self.bucket._hash_for_key(signature)
ret, called = load_or_call(f, key_hash, args, kwargs, varargs, callargs)
if called:
post_key_hash = self.bucket._hash_for_key(signature)
if key_hash != post_key_hash:
optional = ''
if self.method:
optional = ' or instance state'
raise ValueError(
"modification of input parameters{} by function"
" '{}' cannot be cached.".format(optional, f.__name__))
return ret
new_function = decorator(wrapper, f)
new_function.callback = self.add_callback
if self.property:
new_function = property(new_function)
return new_function
def add_callback(self, f):
"""Magic method assigned to f.callback that allows a callback to be
defined as follows:
@bucket
def fun(...):
...
@fun.callback
def fun():
...
In the event that a cached result is used, the callback is fired.
"""
self.callback = f
return self.decorate(self.fref())
def get_instance_signature(instance):
"""Get state of instance for cache signature (as part of key).
Attempts to get state will be done in this order:
- instance._getsate_bucketcache_()
- instance.__getstate__()
- instance.__dict__
"""
with suppress(AttributeError):
return instance._getsate_bucketcache_()
with suppress(AttributeError):
return instance.__getstate__()
return instance.__dict__
def normalize_args(f, *args, **kwargs):
"""Normalize call arguments into keyword form and varargs.
args can only be non-empty if there is *args in the argument specification.
"""
callargs = inspect.getcallargs(f, *args, **kwargs)
original_callargs = callargs.copy()
try:
argspec = inspect.getargspec(f)
except ValueError:
argspec = inspect.getfullargspec(f)
else:
argspec = fullargspec_from_argspec(argspec)
if hasattr(argspec, 'varkw'):
if argspec.varkw:
kwargs = callargs.pop(argspec.varkw, {})
callargs.update(kwargs)
if argspec.varargs:
varargs = callargs.pop(argspec.varargs, ())
else:
varargs = ()
# now callargs is all keywords
return NormalizedArgs(varargs=varargs,
normargs=callargs,
callargs=original_callargs)
def raise_invalid_keys(valid_keys, passed_keys, message=None):
if message is None:
message = 'Invalid keyword argument{s}: {keys}'
if not passed_keys <= valid_keys:
invalid_keys = passed_keys - valid_keys
raise_keys(invalid_keys, message=message)
def raise_keys(keys, message):
invalid_str = ', '.join(keys)
s = 's' if len(keys) > 1 else ''
raise TypeError(message.format(s=s, keys=invalid_str))
| {
"repo_name": "RazerM/bucketcache",
"path": "bucketcache/utilities.py",
"copies": "1",
"size": "8971",
"license": "mit",
"hash": 6003223853698706000,
"line_mean": 31.6218181818,
"line_max": 84,
"alpha_frac": 0.5601382232,
"autogenerated": false,
"ratio": 4.46318407960199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000042780748663101606,
"num_lines": 275
} |
from __future__ import absolute_import, division, print_function
import inspect
import os
from collections import Mapping
from contextlib import contextmanager
from six import add_metaclass
from six.moves import builtins, cPickle
from odin.backend import (keras_callbacks, keras_helpers, losses, metrics,
tf_utils)
from odin.backend.alias import *
from odin.backend.maths import *
from odin.backend.tensor import *
from odin.utils import as_tuple, is_path, is_string
# ===========================================================================
# Make the layers accessible through backend
# ===========================================================================
class _nn_meta(type):
def __getattr__(cls, key):
fw = get_framework()
import torch
import tensorflow as tf
all_objects = {}
if fw == torch:
from odin import networks_torch
all_objects.update(torch.nn.__dict__)
all_objects.update(networks_torch.__dict__)
elif fw == tf:
from odin import networks
all_objects.update(tf.keras.layers.__dict__)
all_objects.update(networks.__dict__)
else:
raise NotImplementedError(str(fw))
return all_objects[key]
@add_metaclass(_nn_meta)
class nn:
pass
| {
"repo_name": "imito/odin",
"path": "odin/backend/__init__.py",
"copies": "1",
"size": "1264",
"license": "mit",
"hash": -6411751419726807000,
"line_mean": 27.0888888889,
"line_max": 77,
"alpha_frac": 0.6083860759,
"autogenerated": false,
"ratio": 4.299319727891157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5407705803791156,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import os
from typing import Text, Type, Union
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import torch
from six import string_types
from tensorflow.python import keras
from odin.backend.maths import (moments, reduce_max, reduce_mean, reduce_min,
reduce_sum, sqrt)
from odin.backend.tensor import concatenate
__all__ = [
'parse_activation', 'parse_attention', 'parse_constraint',
'parse_initializer', 'parse_normalizer', 'parse_regularizer',
'parse_optimizer', 'parse_reduction'
]
# ===========================================================================
# Helper
# ===========================================================================
def _linear_function(x):
return x
def _invalid(msg, obj):
if isinstance(obj, string_types):
pass
elif obj is None:
obj = 'None'
else:
obj = str(type(obj))
raise ValueError("%s, given type: %s" % (msg, obj))
def _is_tensorflow(framework):
if isinstance(framework, string_types):
framework = framework.lower()
if any(i in framework for i in ('tf', 'tensorflow', 'tensor')):
return True
if not inspect.isclass(framework):
framework = type(framework)
cls_desc = str(framework) + ''.join(str(i) for i in type.mro(framework))
if 'tensorflow' in cls_desc:
return True
return False
# ===========================================================================
# Network basics
# ===========================================================================
def parse_activation(activation, framework):
"""
Parameters
----------
activation : `str`
alias for activation function
framework : `str`
'tensorflow' or 'pytorch'
"""
if activation is None:
activation = 'linear'
if callable(activation):
return activation
if isinstance(activation, string_types):
if activation.lower() == 'linear':
return _linear_function
if _is_tensorflow(framework):
return keras.activations.get(activation)
else:
for i in dir(torch.nn.functional):
if i.lower() == activation.lower():
fn = getattr(torch.nn.functional, i)
if inspect.isfunction(fn):
return fn
_invalid("No support for activation", activation)
def parse_initializer(initializer, framework):
if _is_tensorflow(framework):
return keras.initializers.get(initializer)
else:
if callable(initializer):
return initializer
if isinstance(initializer, string_types):
initializer = initializer.lower().replace('glorot_', 'xavier_').replace(
'he_', 'kaiming_')
for i in dir(torch.nn.init):
if i.lower() == initializer.lower() + '_':
fn = getattr(torch.nn.init, i)
if inspect.isfunction(fn):
return fn
_invalid("No support for initializer", initializer)
def parse_optimizer(optimizer, framework) -> Type:
""" Return the class for given optimizer alias """
if _is_tensorflow(framework):
all_classes = {
'adadelta': keras.optimizers.adadelta_v2.Adadelta,
'adagrad': keras.optimizers.adagrad_v2.Adagrad,
'adam': keras.optimizers.adam_v2.Adam,
'adamax': keras.optimizers.adamax_v2.Adamax,
'nadam': keras.optimizers.nadam_v2.Nadam,
'rmsprop': keras.optimizers.rmsprop_v2.RMSprop,
'sgd': keras.optimizers.gradient_descent_v2.SGD,
}
else:
all_classes = {
'adadelta': torch.optim.Adadelta,
'adagrad': torch.optim.Adagrad,
'adam': torch.optim.Adam,
'adamax': torch.optim.Adamax,
'nadam': None,
'rmsprop': torch.optim.RMSprop,
'sgd': torch.optim.SGD,
}
opt = all_classes.get(str(optimizer).lower(), None)
if opt is not None:
return opt
_invalid("No support for optimizer", optimizer)
def parse_regularizer(regularizer, framework):
if regularizer is None:
return None
if _is_tensorflow(framework):
return keras.regularizers.get(regularizer)
else:
pass
_invalid("No support for regularizer", regularizer)
def parse_constraint(constraint, framework):
if constraint is None:
return None
if _is_tensorflow(framework):
return keras.constraints.get(constraint)
else:
pass
_invalid("No support for constraint", constraint)
# ===========================================================================
# Layers
# ===========================================================================
def parse_reduction(reduce: Text, framework=None):
""" Return a reduce function """
if reduce is None:
reduce = 'none'
if isinstance(reduce, string_types):
if "min" in reduce:
return reduce_min
if "max" in reduce:
return reduce_max
if "avg" in reduce or "mean" in reduce:
return reduce_mean
if "sum" in reduce:
return reduce_sum
if "none" in reduce or reduce == "":
return lambda x, *args, **kwargs: x
if "stat" in reduce:
def stat_reduce(x, axis=None, keepdims=None):
m, v = moments(x, axis=axis, keepdims=keepdims)
return concatenate([m, sqrt(v)], axis=-1)
return stat_reduce
_invalid("No support for reduce", reduce)
def parse_attention(attention, framework):
pass
def parse_normalizer(normalizer, framework):
if _is_tensorflow(framework):
if isinstance(normalizer, string_types):
normalizer = normalizer.strip().lower()
if normalizer == 'batchnorm':
return keras.layers.BatchNormalization
elif normalizer == 'batchrenorm':
from odin.networks.util_layers import BatchRenormalization
return keras.layers.BatchRenormalization
elif normalizer == 'layernorm':
return keras.layers.LayerNormalization
elif normalizer == 'instancenorm':
return tfa.layers.InstanceNormalization
elif normalizer == 'groupnorm':
return tfa.layers.GroupNormalization
else:
pass
_invalid("No support for normalizer", normalizer)
def parse_layer(layer, framework):
pass
# ===========================================================================
# Loss and metric
# ===========================================================================
def parse_loss(loss, framework):
pass
def parse_metric(loss, framework):
pass
| {
"repo_name": "imito/odin",
"path": "odin/backend/alias.py",
"copies": "1",
"size": "6350",
"license": "mit",
"hash": -8952101936298481000,
"line_mean": 28.3981481481,
"line_max": 78,
"alpha_frac": 0.5982677165,
"autogenerated": false,
"ratio": 4.126055880441846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5224323596941846,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import os
import re
from abc import ABCMeta, abstractmethod
from collections import Mapping
from enum import Enum
import numpy as np
from six import add_metaclass, string_types
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline as _make_pipeline
from odin.fuel import Dataset
from odin.preprocessing.signal import delta, mvn, stack_frames
from odin.utils import (as_tuple, ctext, dummy_formatter, flatten_list,
get_all_files, get_formatted_datetime, is_pickleable,
is_string)
class ExtractorSignal(object):
""" ExtractorSignal """
def __init__(self):
super(ExtractorSignal, self).__init__()
self._timestamp = get_formatted_datetime(only_number=False)
self._extractor = None
self._msg = ''
self._action = 'ignore'
self._last_input = {}
@property
def message(self):
return self._msg
@property
def action(self):
return self._action
def set_message(self, extractor, msg, last_input):
if self._extractor is not None:
raise RuntimeError(
"This signal has stored message, cannot set message twice.")
assert isinstance(extractor, Extractor), \
'`extractor` must be instance of odin.preprocessing.base.Extractor, ' +\
'but given type: %s' % str(type(extractor))
self._extractor = extractor
self._msg = str(msg)
self._last_input = last_input
return self
def set_action(self, action):
action = str(action).lower()
assert action in ('warn', 'error', 'ignore'), \
"`action` can be one of the following values: 'warn', 'error', 'ignore'; " + \
"but given: %s" % action
self._action = action
return self
def __str__(self):
if self._extractor is None:
raise RuntimeError("The Signal has not been configured by the Extractor")
s = '[%s]' % self._timestamp
s += '%s' % ctext(self._extractor.__class__.__name__, 'cyan') + '\n'
s += 'Error message: "%s"' % ctext(self._msg, 'yellow') + '\n'
s += 'Action: "%s"' % ctext(self._action, 'yellow') + '\n'
# last input
s += 'Last input: \n'
if isinstance(self._last_input, Mapping):
for k, v in sorted(self._last_input.items(), key=lambda x: x[0]):
s += ' %s: %s\n' % (ctext(str(k), 'yellow'), dummy_formatter(v))
else:
s += ' Type: %s\n' % ctext(type(self._last_input), 'yellow')
s += ' Object: %s\n' % ctext(str(self._last_input), 'yellow')
# parameters
s += 'Attributes: \n'
s += ' ' + ctext('InputLayer', 'yellow') + ': ' + str(
self._extractor.is_input_layer) + '\n'
s += ' ' + ctext('RobustLevel',
'yellow') + ': ' + self._extractor.robust_level + '\n'
s += ' ' + ctext('InputName', 'yellow') + ': ' + str(
self._extractor.input_name) + '\n'
s += ' ' + ctext('OutputName', 'yellow') + ': ' + str(
self._extractor.output_name) + '\n'
for name, param in self._extractor.get_params().items():
if name not in ('_input_name', '_output_name'):
s += ' ' + ctext(name, 'yellow') + ': ' + dummy_formatter(param) + '\n'
return s
# ===========================================================================
# Helper
# ===========================================================================
def make_pipeline(steps, debug=False):
""" NOTE: this method automatically revmove None entries
- Flatten list or dictionary found in steps.
- Remove any object that not is instance of `Extractor`
during creation of `Pipeline`.
"""
ID = [0]
def item2step(x):
if isinstance(x, (tuple, list)):
if len(x) == 1 and isinstance(x[0], Extractor):
x = x[0]
ID[0] += 1
return (x.__class__.__name__ + str(ID[0]), x)
elif len(x) == 2:
if is_string(x[0]) and isinstance(x[1], Extractor):
return x
elif is_string(x[1]) and isinstance(x[0], Extractor):
return (x[1], x[0])
elif isinstance(x, Extractor):
ID[0] += 1
return (x.__class__.__name__ + str(ID[0]), x)
return None
if isinstance(steps, Mapping):
steps = steps.items()
elif not isinstance(steps, (tuple, list)):
steps = [steps]
steps = [item2step(i) for i in steps]
# remove None
steps = [s for s in steps if s is not None]
if len(steps) == 0:
raise ValueError(
"No instance of odin.preprocessing.base.Extractor found in `steps`.")
# ====== set debug mode ====== #
set_extractor_debug([i[1] for i in steps], debug=bool(debug))
# ====== return pipeline ====== #
ret = Pipeline(steps=steps)
return ret
def set_extractor_debug(extractors, debug):
# ====== prepare ====== #
if isinstance(extractors, (tuple, list)):
extractors = [
i for i in flatten_list(extractors) if isinstance(i, Extractor)
]
elif isinstance(extractors, Pipeline):
extractors = [i[-1] for i in extractors.steps]
elif isinstance(extractors, Mapping):
extractors = [i[-1] for i in extractors.items()]
else:
raise ValueError("No support for `extractors` type: %s" % type(extractors))
# ====== set the value ====== #
for i in extractors:
i._debug = bool(debug)
return extractors
def _equal_inputs_outputs(x, y):
try:
if x != y:
return False
except Exception:
pass
return True
def _preprocess(x):
if isinstance(x, np.str_):
x = str(x)
return x
# ===========================================================================
# Basic extractors
# ===========================================================================
class Extractor(BaseEstimator, TransformerMixin):
""" Extractor
The developer must override the `_transform` method:
- If the return is instance of `collections.Mapping`, the new features
will be merged into existed features dictionary (i.e. the input dictionary)
- If the return is not instance of `Mapping`, the name of given classes will
be used to name the returned features.
- If `None` is returned, no `_transform` is called, just return None for
the whole pipeline (i.e. None act as terminal signal)
Parameters
----------
input_name : {None, string, list of string}
list of string represent the name of feature
output_name : {None, string, list of string}
default name for the output feature (in case the return is not
instance of dictionary);
If `input_name` is None and `output_name` is None, use lower
case of class name as default
is_input_layer : bool (default: False)
An input layer accept any type of input to `transform`,
otherwise, only accept a dictionary type as input.
robust_level : {'ignore', 'warn', 'error'}
'ignore' - ignore error files
'warn' - warn about error file during processing
'error' - raise Exception and stop processing
"""
def __init__(self,
input_name=None,
output_name=None,
is_input_layer=False,
robust_level='ignore',
name=None):
super(Extractor, self).__init__()
if name is None:
self._name = "%s%d" % (self.__class__.__name__, np.random.rand(0, 888888))
else:
self._name = str(name)
self._debug = False
self._is_input_layer = bool(is_input_layer)
self._last_debugging_text = ''
# ====== robust level ====== #
robust_level = str(robust_level).lower()
assert robust_level in ('ignore', 'warn', 'error'),\
"`robust_level` can be one of the following values: " + \
"'warn', 'error', 'ignore'; but given: %s" % robust_level
self._robust_level = robust_level
# ====== check input_name ====== #
if input_name is None:
pass
elif isinstance(input_name, string_types):
pass
elif hasattr(input_name, '__iter__'):
input_name = tuple([str(i).lower() for i in input_name])
else:
raise ValueError("No support for `input_name` type: %s" %
str(type(input_name)))
self._input_name = input_name
# ====== check output_name ====== #
if output_name is None:
if input_name is None:
output_name = self.__class__.__name__.lower()
else:
output_name = input_name
elif isinstance(output_name, string_types):
pass
elif hasattr(output_name, '__iter__'):
output_name = tuple([str(i).lower() for i in output_name])
else:
raise ValueError("No support for `output_name` type: %s" %
str(type(output_name)))
self._output_name = output_name
@property
def name(self):
return self._name
@property
def last_debugging_text(self):
""" Return the last debugging information recorded during
calling the `transform` method with `debug=True` """
if not hasattr(self, '_last_debugging_text'):
self._last_debugging_text = ''
return self._last_debugging_text
@property
def input_name(self):
return self._input_name
@property
def output_name(self):
return self._output_name
@property
def is_input_layer(self):
return self._is_input_layer
@property
def robust_level(self):
return self._robust_level
def set_debug(self, debug):
self._debug = bool(debug)
return self
def fit(self, X, y=None):
# Do nothing here
return self
def __call__(self, X):
return self.transform(X)
def _transform(self, X):
raise NotImplementedError
def transform(self, X):
# NOTE: do not override this method
if isinstance(X, ExtractorSignal):
return X
# ====== interpret different signal ====== #
if X is None:
return ExtractorSignal().set_message(
extractor=self,
msg="`None` value is returned by extractor",
last_input=X).set_action(self.robust_level)
# ====== input layer ====== #
if not self.is_input_layer and not isinstance(X, Mapping):
err_msg = "the input to `Extractor.transform` must be instance of dictionary, " + \
"but given type: %s" % str(type(X))
return ExtractorSignal().set_message(extractor=self,
msg=err_msg,
last_input=X).set_action(
self.robust_level)
# ====== the transformation ====== #
if self.input_name is not None and isinstance(X, Mapping):
for name in as_tuple(self.input_name, t=string_types):
if name not in X:
return ExtractorSignal().set_message(
extractor=self,
msg="Cannot find features with name: %s" % name,
last_input=X).set_action('error')
y = self._transform(X)
# if return Signal or None, no post-processing
if isinstance(y, ExtractorSignal):
return y
if y is None:
return ExtractorSignal().set_message(
extractor=self,
msg="`None` value is returned by the extractor: %s" %
self.__class__.__name__,
last_input=X).set_action(self.robust_level)
# ====== return type must always be a dictionary ====== #
if not isinstance(y, Mapping):
if isinstance(y, (tuple, list)):
y = {
i: j for i, j in zip(as_tuple(self.output_name, t=string_types), y)
}
else:
y = {self.output_name: y}
# ====== Merge previous results ====== #
# remove None values
tmp = {}
for name, feat in y.items():
if any(c.isupper() for c in name):
return ExtractorSignal().set_message(
extractor=self,
msg="Name for features cannot contain upper case",
last_input=X).set_action('error')
if feat is None:
continue
tmp[name] = feat
y = tmp
# add old features extracted in X, but do NOT override new features in y
if isinstance(X, Mapping):
for name, feat in X.items():
if any(c.isupper() for c in name):
return ExtractorSignal().set_message(
extractor=self,
msg="Name for features cannot contain upper case",
last_input=X).set_action('error')
if name not in y:
y[name] = _preprocess(feat)
# ====== print debug text ====== #
# maybe someone implement __getstate__ and forget _debug
if not hasattr(self, '_debug'):
self._debug = False
if self._debug:
debug_text = ''
debug_text += '%s %s\n' % (ctext(
"[Extractor]", 'cyan'), ctext(self.__class__.__name__, 'magenta'))
# inputs
if not _equal_inputs_outputs(X, y):
debug_text += ' %s\n' % ctext("Inputs:", 'yellow')
debug_text += ' %s\n' % ctext("-------", 'yellow')
if isinstance(X, Mapping):
for k, v in X.items():
debug_text += ' %s : %s\n' % (ctext(k,
'blue'), dummy_formatter(v))
else:
debug_text += ' %s\n' % dummy_formatter(X)
# outputs
debug_text += ' %s\n' % ctext("Outputs:", 'yellow')
debug_text += ' %s\n' % ctext("-------", 'yellow')
if isinstance(y, Mapping):
for k, v in y.items():
debug_text += ' %s : %s\n' % (ctext(k, 'blue'), dummy_formatter(v))
else:
debug_text += ' %s\n' % dummy_formatter(y)
# parameters
for name, param in self.get_params().items():
if name not in ('_input_name', '_output_name'):
debug_text += ' %s : %s\n' % (ctext(
name, 'yellow'), dummy_formatter(param))
self._last_debugging_text = debug_text
print(debug_text)
return y
# ===========================================================================
# General extractor
# ===========================================================================
class Converter(Extractor):
""" Convert the value under `input_name` to a new value
using `converter` function, and save the new value to
the `output_name`.
This could be mapping 1 -> 1 or many -> 1; in case of
many to 1 mapping, the `converter` function will be
call as `converter(*args)`
Parameters
----------
converter: {Mapping, call-able}
convert `inputs['name'] = converter(inputs[keys])`
"""
def __init__(self, converter, input_name='name', output_name='name'):
super(Converter, self).__init__(input_name=as_tuple(input_name,
t=string_types),
output_name=str(output_name))
# ====== check converter ====== #
if not hasattr(converter, '__call__') and \
not isinstance(converter, Mapping):
raise ValueError("`converter` must be call-able.")
# converter can be function or dictionary
self.converter = converter
def _transform(self, feat):
X = [feat[name] for name in self.input_name]
if hasattr(self.converter, '__call__'):
name = self.converter(*X)
else:
name = self.converter[X[0] if len(X) == 1 else X]
return {self.output_name: name}
class DeltaExtractor(Extractor):
""" Extracting the delta coefficients given the axis
Parameters
----------
input_name : list of str
list of all features name for calculating the delta
width : int
amount of frames taken into account for 1 delta
order : list of int
list of all delta order will be concatenate (NOTE: keep `0` in
the list if you want to keep original features)
axis : int
which dimension calculating the delta
(suggest time-dimension for acoustic features, i.e. axis=0)
"""
def __init__(self,
input_name,
output_name=None,
width=9,
order=(0, 1),
axis=0):
super(DeltaExtractor, self).__init__(input_name=as_tuple(input_name,
t=string_types),
output_name=output_name)
# ====== check width ====== #
width = int(width)
if width % 2 == 0 or width < 3:
raise ValueError("`width` must be odd integer >= 3, give value: %d" %
width)
self.width = width
# ====== check order ====== #
self.order = as_tuple(order, t=int)
# ====== axis ====== #
self.axis = axis
def _calc_deltas(self, X):
all_deltas = delta(data=X,
width=self.width,
order=max(self.order),
axis=self.axis)
if not isinstance(all_deltas, (tuple, list)):
all_deltas = (all_deltas,)
else:
all_deltas = tuple(all_deltas)
all_deltas = (X,) + all_deltas
all_deltas = tuple([d for i, d in enumerate(all_deltas) if i in self.order])
return np.concatenate(all_deltas, axis=-1)
def _transform(self, feat):
return [self._calc_deltas(feat[name]) for name in self.input_name]
class EqualizeShape0(Extractor):
""" EqualizeShape0
The final length of all features is the `minimum length`.
This Extractor shrink the shape of all given features in `feat_name`
to the same length.
Raise Error if given files is shorted than desire length
Parameters
----------
input_name: {None, list of string}
list of features name will be used for calculating the
running statistics.
If None, calculate the statistics for all `numpy.ndarray`
shrink_mode: 'center', 'left', 'right'
center: remove data points from both left and right
left: remove data points at the beginning (left)
right: remove data points at the end (right)
"""
def __init__(self, input_name=None, shrink_mode='right'):
super(
EqualizeShape0,
self).__init__(input_name=as_tuple(input_name, t=string_types
) if input_name is not None else None)
shrink_mode = str(shrink_mode).lower()
if shrink_mode not in ('center', 'left', 'right'):
raise ValueError("shrink mode support include: center, left, right")
self.shrink_mode = shrink_mode
def _transform(self, feat):
if self.input_name is None:
X = []
output_name = []
for key, val in feat.items():
if isinstance(val, np.ndarray) and val.ndim > 0:
X.append(val)
output_name.append(key)
else:
X = [feat[name] for name in self.input_name]
output_name = self.input_name
# ====== searching for desire length ====== #
n = min(i.shape[0] for i in X)
# ====== equalize ====== #
equalized = {}
for name, y in zip(output_name, X):
# cut the features in left and right
# if the shape[0] is longer
if y.shape[0] != n:
diff = y.shape[0] - n
if diff < 0:
print("Feature length: %d which is smaller "
"than desire length: %d, feature name is '%s'" %
(y.shape[0], n, X['name']))
return None
elif diff > 0:
if self.shrink_mode == 'center':
diff_left = diff // 2
diff_right = diff - diff_left
y = y[diff_left:-diff_right]
elif self.shrink_mode == 'right':
y = y[:-diff]
elif self.shrink_mode == 'left':
y = y[diff:]
equalized[name] = y
return equalized
class RunningStatistics(Extractor):
""" Running statistics
Parameters
----------
input_name: {string, list of string}
list of features name will be used for calculating the
running statistics.
If None, calculate the statistics for all `numpy.ndarray`
with `ndim` > 0
axis : int (default: 0)
the axis for calculating the statistics
prefix : ''
the prefix append to 'sum1' and 'sum2'
"""
def __init__(self, input_name=None, axis=0, prefix=''):
super(
RunningStatistics,
self).__init__(input_name=as_tuple(input_name, t=string_types
) if input_name is not None else None)
self.axis = axis
self.prefix = str(prefix)
def get_sum1_name(self, feat_name):
return '%s_%ssum1' % (feat_name, self.prefix)
def get_sum2_name(self, feat_name):
return '%s_%ssum2' % (feat_name, self.prefix)
def _transform(self, feat):
if self.input_name is None:
X = []
output_name = []
for key, val in feat.items():
if isinstance(val, np.ndarray) and val.ndim > 0:
X.append(val)
output_name.append(key)
else:
X = [feat[name] for name in self.input_name]
output_name = self.input_name
# ====== calculate the statistics ====== #
for name, y in zip(output_name, X):
# ====== SUM of x^1 ====== #
sum1 = np.sum(y, axis=self.axis, dtype='float64')
s1_name = self.get_sum1_name(name)
if s1_name not in feat:
feat[s1_name] = sum1
else:
feat[s1_name] += sum1
# ====== SUM of x^2 ====== #
sum2 = np.sum(np.power(y, 2), axis=self.axis, dtype='float64')
s2_name = self.get_sum2_name(name)
if s2_name not in feat:
feat[s2_name] = sum2
else:
feat[s2_name] += sum2
return feat
class AsType(Extractor):
""" An extractor convert given features to given types
Parameters
----------
dtype : {string, numpy.dtype}
desire type
input_name: {string, list of string}
list of features name will be used for calculating the
running statistics.
If None, calculate the statistics for all object `hasattr`
'astype'
exclude_pattern : {string, None}
regular expression pattern to exclude all features with given
name pattern, only used when `input_name=None`.
By default, exclude all running statistics '*_sum1' and '*_sum2'
"""
def __init__(self, dtype, input_name=None, exclude_pattern=".+\_sum[1|2]"):
super(
AsType,
self).__init__(input_name=as_tuple(input_name, t=string_types
) if input_name is not None else None)
self.dtype = np.dtype(dtype)
if isinstance(exclude_pattern, string_types):
exclude_pattern = re.compile(exclude_pattern)
else:
exclude_pattern = None
self.exclude_pattern = exclude_pattern
def _transform(self, feat):
# ====== preprocessing ====== #
if self.input_name is None:
X = []
output_name = []
for key, val in feat.items():
if hasattr(val, 'astype'):
if self.exclude_pattern is not None and \
self.exclude_pattern.search(key):
continue
X.append(val)
output_name.append(key)
else:
X = [feat[name] for name in self.input_name]
output_name = self.input_name
# ====== astype ====== #
updates = {}
for name, y in zip(output_name, X):
updates[name] = y.astype(self.dtype)
return updates
class DuplicateFeatures(Extractor):
def __init__(self, input_name, output_name):
super(DuplicateFeatures,
self).__init__(input_name=as_tuple(input_name, t=string_types),
output_name=as_tuple(output_name, t=string_types))
def _transform(self, feat):
return {
out_name: feat[in_name]
for in_name, out_name in zip(self.input_name, self.output_name)
}
class RenameFeatures(Extractor):
def __init__(self, input_name, output_name):
super(RenameFeatures, self).__init__(input_name=as_tuple(input_name,
t=string_types),
output_name=as_tuple(output_name,
t=string_types))
def _transform(self, X):
return X
def transform(self, feat):
if isinstance(feat, Mapping):
for old_name, new_name in zip(self.input_name, self.output_name):
if old_name in feat: # only remove if it exist
X = feat[old_name]
del feat[old_name]
feat[new_name] = X
return feat
class DeleteFeatures(Extractor):
""" Remove features by name from extracted features dictionary """
def __init__(self, input_name):
super(DeleteFeatures, self).__init__()
self._name = as_tuple(input_name, t=string_types)
def _transform(self, X):
return X
def transform(self, feat):
if isinstance(feat, Mapping):
for name in self._name:
if name in feat: # only remove if it exist
del feat[name]
return feat
# ===========================================================================
# Shape
# ===========================================================================
class StackFeatures(Extractor):
""" Stack context (or splice multiple frames) into
single vector.
Parameters
----------
n_context : int
number of context frame on the left and right, the final
number of stacked frame is `context * 2 + 1`
NOTE: the stacking process, ignore `context` frames at the
beginning on the left, and at the end on the right.
input_name : {None, list of string}
list of features name will be used for calculating the
running statistics.
If None, calculate the statistics for all `numpy.ndarray`
mvn: bool
if True, preform mean-variance normalization on input features.
"""
def __init__(self, n_context, input_name=None):
super(
StackFeatures,
self).__init__(input_name=as_tuple(input_name, t=string_types
) if input_name is not None else None)
self.n_context = int(n_context)
assert self.n_context > 0
def _transform(self, feat):
if self.input_name is None:
X = []
output_name = []
for key, val in feat.items():
if isinstance(val, np.ndarray) and val.ndim > 0:
X.append(val)
output_name.append(key)
else:
X = [feat[name] for name in self.input_name]
output_name = self.input_name
# ====== this ====== #
for name, y in zip(output_name, X):
# stacking the context frames
y = stack_frames(y,
frame_length=self.n_context * 2 + 1,
step_length=1,
keep_length=True,
make_contigous=True)
feat[name] = y
return feat
| {
"repo_name": "imito/odin",
"path": "odin/preprocessing/base.py",
"copies": "1",
"size": "26283",
"license": "mit",
"hash": -7711228074606720000,
"line_mean": 33.0453367876,
"line_max": 89,
"alpha_frac": 0.5666019861,
"autogenerated": false,
"ratio": 3.835254633007442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4901856619107442,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import os
import numpy as np
import torch
from six import add_metaclass
from tensorflow.python.keras.utils import conv_utils
from torch import nn
from torch.nn import functional
from odin.backend import (parse_activation, parse_constraint, parse_initializer,
parse_regularizer)
class Layer(nn.Module):
def __init__(self, **kwargs):
super(Layer, self).__init__()
self.built = False
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
self.built = True
def forward(self, *inputs, **kwargs):
n_inputs = len(inputs)
input_shape = [i.shape for i in inputs]
if n_inputs == 1:
input_shape = input_shape[0]
if not self.built:
self.build(input_shape)
# call
inputs = inputs[0] if n_inputs == 1 else inputs
# this make life easier but not the solution for everything
if isinstance(inputs, np.ndarray):
inputs = torch.Tensor(inputs)
# intelligent call
specs = inspect.getfullargspec(self.call)
if specs.varkw is not None:
return self.call(inputs, **kwargs)
kw = {i: kwargs[i] for i in specs.args[2:] if i in kwargs}
return self.call(inputs, **kw)
def call(self, inputs, **kwargs):
raise NotImplementedError
class Dense(Layer):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(Dense, self).__init__()
self.units = int(units)
self.activation = parse_activation(activation, self)
self.use_bias = use_bias
self.kernel_initializer = parse_initializer(kernel_initializer, self)
self.bias_initializer = parse_initializer(bias_initializer, self)
def build(self, input_shape):
D_in = input_shape[-1]
D_out = self.units
self._linear = nn.Linear(in_features=D_in,
out_features=D_out,
bias=self.use_bias)
self.kernel_initializer(self._linear.weight)
if self.use_bias:
self.bias_initializer(self._linear.bias)
return super(Dense, self).build(input_shape)
def call(self, inputs, training=None):
y = self._linear(inputs)
return self.activation(y)
# ===========================================================================
# Convolution
# ===========================================================================
class Conv(Layer):
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
name=None,
**kwargs):
super(Conv, self).__init__()
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self, (Conv1D,))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = parse_activation(activation, self)
self.use_bias = use_bias
self.kernel_initializer = parse_initializer(kernel_initializer, self)
self.bias_initializer = parse_initializer(bias_initializer, self)
def build(self, input_shape):
in_channels = (input_shape[-1]
if self.data_format == 'channels_last' else input_shape[1])
spatial_shape = (input_shape[2:] if self.data_format == 'channels_first'
else input_shape[1:-1])
# 1D `(padW,)`
# 2D `(padH, padW)`
# 3D `(padT, padH, padW)`
if self.padding == 'valid':
padding = 0
padding_mode = 'zeros'
elif self.padding == 'same':
padding = [i // 2 for i in self.kernel_size]
padding_mode = 'zeros'
elif self.padding == 'causal' and self.rank == 1:
padding = 0
padding_mode = 'zeros'
else:
raise NotImplementedError("No support for padding='%s' and rank=%d" %
(self.padding, self.rank))
if self.rank == 1:
self._conv = torch.nn.Conv1d(in_channels=in_channels,
out_channels=self.filters,
kernel_size=self.kernel_size,
stride=self.strides,
padding=padding,
dilation=self.dilation_rate,
groups=1,
bias=self.use_bias,
padding_mode=padding_mode)
elif self.rank == 2:
self._conv = torch.nn.Conv2d(in_channels=in_channels,
out_channels=self.filters,
kernel_size=self.kernel_size,
stride=self.strides,
padding=padding,
dilation=self.dilation_rate,
groups=1,
bias=self.use_bias,
padding_mode=padding_mode)
elif self.rank == 3:
self._conv = torch.nn.Conv3d(in_channels=in_channels,
out_channels=self.filters,
kernel_size=self.kernel_size,
stride=self.strides,
padding=padding,
dilation=self.dilation_rate,
groups=1,
bias=True,
padding_mode=padding_mode)
else:
raise NotImplementedError("No support for rank=%d" % self.rank)
self.kernel_initializer(self._conv.weight)
if self.use_bias:
self.bias_initializer(self._conv.bias)
return super(Conv, self).build(input_shape)
def call(self, inputs, training=None):
# causal padding for temporal signal
if self.padding == 'causal' and self.rank == 1:
inputs = functional.pad(inputs,
self._compute_causal_padding(),
mode='constant',
value=0)
# pytorch only support channels_first
if self.data_format == 'channels_last':
inputs = inputs.transpose(1, -1)
# applying the convolution
y = self._conv(inputs)
if self.data_format == 'channels_last':
y = y.transpose(1, -1)
return y
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers.
@Original code: tensorflow.keras
"""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [0, 0, left_pad, 0, 0, 0]
else:
causal_padding = [0, 0, 0, 0, left_pad, 0]
return causal_padding
class Conv1D(Conv):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(Conv1D, self).__init__(rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
**kwargs)
class ConvCausal(Conv1D):
def __init__(self,
filters,
kernel_size,
strides=1,
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(ConvCausal, self).__init__(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='causal',
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
**kwargs)
class Conv2D(Conv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(Conv2D, self).__init__(rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
**kwargs)
class Conv3D(Conv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(Conv3D, self).__init__(rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
**kwargs)
# ===========================================================================
# Recurrent neural network
# ===========================================================================
class _RNNLayer(Layer):
def __init__(self, kernel_initializer, recurrent_initializer,
bias_initializer, return_sequences, return_state, go_backwards,
stateful, **kwargs):
super(_RNNLayer, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
if stateful:
raise NotImplementedError(
"pytorch currently does not support stateful RNN")
self.kernel_initializer = parse_initializer(kernel_initializer, self)
self.recurrent_initializer = parse_initializer(recurrent_initializer, self)
self.bias_initializer = parse_initializer(bias_initializer, self)
def build(self, input_shape):
if not hasattr(self, '_rnn'):
raise RuntimeError(
"instance of pytorch RNN must be create and assigned to attribute "
"name '_rnn' during `build`.")
for layer_idx in range(self._rnn.num_layers):
self.kernel_initializer(getattr(self._rnn, 'weight_ih_l%d' % layer_idx))
self.recurrent_initializer(getattr(self._rnn,
'weight_hh_l%d' % layer_idx))
b_ih = getattr(self._rnn, 'bias_ih_l%d' % layer_idx)
b_hh = getattr(self._rnn, 'bias_hh_l%d' % layer_idx)
self.bias_initializer(b_ih)
self.bias_initializer(b_hh)
if getattr(self, 'unit_forget_bias', False):
# b_ii|b_if|b_ig|b_io
b_ih[self.units:self.units * 2] = 1
# b_hi|b_hf|b_hg|b_ho
b_hh[self.units:self.units * 2] = 1
return super(_RNNLayer, self).build(input_shape)
def call(self, inputs):
if self.go_backwards:
inputs = inputs.flip(1)
outputs, states = self._rnn(inputs)
if not isinstance(states, (tuple, list)):
states = (states,)
if not self.return_sequences:
outputs = outputs[:, -1]
if not self.return_state:
return outputs
return [outputs] + list(states)
class SimpleRNN(_RNNLayer):
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
num_layers=1,
bidirectional=False,
**kwargs):
super(SimpleRNN, self).__init__(kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.units = int(units)
self.activation = str(activation)
self.use_bias = use_bias
self.dropout = dropout
self.num_layers = num_layers
self.bidirectional = bidirectional
def build(self, input_shape):
input_size = input_shape[-1]
self._rnn = nn.RNN(input_size=input_size,
hidden_size=self.units,
num_layers=self.num_layers,
nonlinearity=self.activation,
bias=self.use_bias,
batch_first=True,
dropout=self.dropout,
bidirectional=self.bidirectional)
return super(SimpleRNN, self).build(input_shape)
class LSTM(_RNNLayer):
def __init__(self,
units,
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
num_layers=1,
bidirectional=False,
**kwargs):
super(LSTM, self).__init__(kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.units = int(units)
self.use_bias = use_bias
self.dropout = dropout
self.unit_forget_bias = unit_forget_bias
self.num_layers = num_layers
self.bidirectional = bidirectional
def build(self, input_shape):
input_size = input_shape[-1]
self._rnn = nn.LSTM(input_size=input_size,
hidden_size=self.units,
num_layers=self.num_layers,
bias=self.use_bias,
batch_first=True,
dropout=self.dropout,
bidirectional=self.bidirectional)
return super(LSTM, self).build(input_shape)
class GRU(_RNNLayer):
def __init__(self,
units,
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
num_layers=1,
bidirectional=False,
**kwargs):
super(GRU, self).__init__(kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.units = int(units)
self.use_bias = use_bias
self.dropout = dropout
self.num_layers = num_layers
self.bidirectional = bidirectional
def build(self, input_shape):
input_size = input_shape[-1]
self._rnn = nn.GRU(input_size=input_size,
hidden_size=self.units,
num_layers=self.num_layers,
bias=self.use_bias,
batch_first=True,
dropout=self.dropout,
bidirectional=self.bidirectional)
return super(GRU, self).build(input_shape)
| {
"repo_name": "imito/odin",
"path": "odin/networks_torch/keras_torch.py",
"copies": "1",
"size": "19521",
"license": "mit",
"hash": -62344879765102130,
"line_mean": 37.0526315789,
"line_max": 80,
"alpha_frac": 0.5001792941,
"autogenerated": false,
"ratio": 4.622543215723419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00365496458795037,
"num_lines": 513
} |
from __future__ import absolute_import, division, print_function
import inspect
class BasicRegistrationFactory(object):
"""
Generalized registerable factory type.
Widgets (classes) can be registered with an instance of this class.
Arguments to the factory's `__call__` method are then passed to a function
specified by the registered factory, which validates the input and returns
a instance of the class that best matches the inputs.
Attributes
----------
registry : dict
Dictionary mapping classes (key) to function (value) which validates
input.
default_widget_type : type
Class of the default widget. Defaults to None.
validation_functions : list of strings
List of function names that are valid validation functions.
Parameters
----------
default_widget_type : type, optional
additional_validation_functions : list of strings, optional
List of strings corresponding to additional validation function names.
Notes
-----
* A valid validation function must be a classmethod of the registered widget
and it must return True or False.
"""
def __init__(self, default_widget_type=None, additional_validation_functions=[]):
self.registry = dict()
self.default_widget_type = default_widget_type
self.validation_functions = ['_factory_validation_function'] + additional_validation_functions
def __call__(self, *args, **kwargs):
""" Method for running the factory.
Arguments args and kwargs are passed through to the validation
function and to the constructor for the final type.
"""
# Any preprocessing and massaging of inputs can happen here
return self._check_registered_widget(*args, **kwargs)
def _check_registered_widget(self, *args, **kwargs):
""" Implementation of a basic check to see if arguments match a widget."""
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](*args, **kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({0}). Specify enough keywords to guarantee unique type identification.".format(n_matches))
# Only one is found
WidgetType = candidate_widget_types[0]
return WidgetType(*args, **kwargs)
def register(self, WidgetType, validation_function=None, is_default=False):
""" Register a widget with the factory.
If `validation_function` is not specified, tests `WidgetType` for
existence of any function in in the list `self.validation_functions`,
which is a list of strings which must be callable class attribute
Parameters
----------
WidgetType : type
Widget to register.
validation_function : function, optional
Function to validate against. Defaults to None, which indicates
that a classmethod in validation_functions is used.
is_default : bool, optional
Sets WidgetType to be the default widget.
"""
if is_default:
self.default_widget_type = WidgetType
elif validation_function is not None:
if not callable(validation_function):
raise AttributeError("Keyword argument 'validation_function' must be callable.")
self.registry[WidgetType] = validation_function
else:
found = False
for vfunc_str in self.validation_functions:
if hasattr(WidgetType, vfunc_str):
vfunc = getattr(WidgetType, vfunc_str)
# check if classmethod: stackoverflow #19227724
_classmethod = inspect.ismethod(vfunc) and vfunc.__self__ is WidgetType
if _classmethod:
self.registry[WidgetType] = vfunc
found = True
break
else:
raise ValidationFunctionError("{0}.{1} must be a classmethod.".format(WidgetType.__name__, vfunc_str))
if not found:
raise ValidationFunctionError("No proper validation function for class {0} found.".format(WidgetType.__name__))
def unregister(self, WidgetType):
""" Remove a widget from the factory's registry."""
self.registry.pop(WidgetType)
class NoMatchError(Exception):
"""Exception for when no candidate class is found."""
class MultipleMatchError(Exception):
"""Exception for when too many candidate classes are found."""
class ValidationFunctionError(AttributeError):
"""Exception for when no candidate class is found."""
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/util/datatype_factory_base.py",
"copies": "1",
"size": "5197",
"license": "bsd-2-clause",
"hash": -3293656992309893000,
"line_mean": 33.417218543,
"line_max": 166,
"alpha_frac": 0.6372907447,
"autogenerated": false,
"ratio": 5.0164092664092665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6153700011109267,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import inspect
import numpy as np
try:
import scipy
import scipy.fftpack
except ImportError:
scipy = None
chunk_error = ("Dask array only supports taking an FFT along an axis that \n"
"has a single chunk. An FFT operation was tried on axis %s \n"
"which has chunks %s. To change the array's chunks use "
"dask.Array.rechunk.")
fft_preamble = """
Wrapping of %s
The axis along which the FFT is applied must have a one chunk. To change
the array's chunking use dask.Array.rechunk.
The %s docstring follows below:
"""
def _fft_out_chunks(a, s, axes):
""" For computing the output chunks of [i]fft*"""
if s is None:
return a.chunks
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _rfft_out_chunks(a, s, axes):
""" For computing the output chunks of rfft*"""
if s is None:
s = [a.chunks[axis][0] for axis in axes]
s = list(s)
s[-1] = s[-1] // 2 + 1
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _irfft_out_chunks(a, s, axes):
""" For computing the output chunks of irfft*"""
if s is None:
s = [a.chunks[axis][0] for axis in axes]
s[-1] = 2 * (s[-1] - 1)
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _hfft_out_chunks(a, s, axes):
assert len(axes) == 1
axis = axes[0]
if s is None:
s = [2 * (a.chunks[axis][0] - 1)]
n = s[0]
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _ihfft_out_chunks(a, s, axes):
assert len(axes) == 1
axis = axes[0]
if s is None:
s = [a.chunks[axis][0]]
else:
assert len(s) == 1
n = s[0]
chunks = list(a.chunks)
if n % 2 == 0:
m = (n // 2) + 1
else:
m = (n + 1) // 2
chunks[axis] = (m,)
return chunks
_out_chunk_fns = {'fft': _fft_out_chunks,
'ifft': _fft_out_chunks,
'rfft': _rfft_out_chunks,
'irfft': _irfft_out_chunks,
'hfft': _hfft_out_chunks,
'ihfft': _ihfft_out_chunks}
def fft_wrap(fft_func, kind=None, dtype=None):
""" Wrap 1D complex FFT functions
Takes a function that behaves like ``numpy.fft`` functions and
a specified kind to match it to that are named after the functions
in the ``numpy.fft`` API.
Supported kinds include:
* fft
* ifft
* rfft
* irfft
* hfft
* ihfft
Examples
--------
>>> parallel_fft = fft_wrap(np.fft.fft)
>>> parallel_ifft = fft_wrap(np.fft.ifft)
"""
if scipy is not None:
if fft_func is scipy.fftpack.rfft:
raise ValueError("SciPy's `rfft` doesn't match the NumPy API.")
elif fft_func is scipy.fftpack.irfft:
raise ValueError("SciPy's `irfft` doesn't match the NumPy API.")
if kind is None:
kind = fft_func.__name__
try:
out_chunk_fn = _out_chunk_fns[kind.rstrip("2n")]
except KeyError:
raise ValueError("Given unknown `kind` %s." % kind)
def func(a, s=None, axes=None):
if axes is None:
if kind.endswith('2'):
axes = (-2, -1)
elif kind.endswith('n'):
if s is None:
axes = tuple(range(a.ndim))
else:
axes = tuple(range(len(s)))
else:
axes = (-1,)
else:
if len(set(axes)) < len(axes):
raise ValueError("Duplicate axes not allowed.")
_dtype = dtype
if _dtype is None:
_dtype = fft_func(np.ones(len(axes) * (8,),
dtype=a.dtype)).dtype
for each_axis in axes:
if len(a.chunks[each_axis]) != 1:
raise ValueError(chunk_error % (each_axis, a.chunks[each_axis]))
chunks = out_chunk_fn(a, s, axes)
args = (s, axes)
if kind.endswith('fft'):
axis = None if axes is None else axes[0]
n = None if s is None else s[0]
args = (n, axis)
return a.map_blocks(fft_func, *args, dtype=_dtype,
chunks=chunks)
if kind.endswith('fft'):
_func = func
def func(a, n=None, axis=None):
s = None
if n is not None:
s = (n,)
axes = None
if axis is not None:
axes = (axis,)
return _func(a, s, axes)
func_mod = inspect.getmodule(fft_func)
func_name = fft_func.__name__
func_fullname = func_mod.__name__ + "." + func_name
if fft_func.__doc__ is not None:
func.__doc__ = (fft_preamble % (2 * (func_fullname,)))
func.__doc__ += fft_func.__doc__
func.__name__ = func_name
return func
fft = fft_wrap(np.fft.fft, dtype=np.complex_)
fft2 = fft_wrap(np.fft.fft2, dtype=np.complex_)
fftn = fft_wrap(np.fft.fftn, dtype=np.complex_)
ifft = fft_wrap(np.fft.ifft, dtype=np.complex_)
ifft2 = fft_wrap(np.fft.ifft2, dtype=np.complex_)
ifftn = fft_wrap(np.fft.ifftn, dtype=np.complex_)
rfft = fft_wrap(np.fft.rfft, dtype=np.complex_)
rfft2 = fft_wrap(np.fft.rfft2, dtype=np.complex_)
rfftn = fft_wrap(np.fft.rfftn, dtype=np.complex_)
irfft = fft_wrap(np.fft.irfft, dtype=np.float_)
irfft2 = fft_wrap(np.fft.irfft2, dtype=np.float_)
irfftn = fft_wrap(np.fft.irfftn, dtype=np.float_)
hfft = fft_wrap(np.fft.hfft, dtype=np.float_)
ihfft = fft_wrap(np.fft.ihfft, dtype=np.complex_)
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/fft.py",
"copies": "1",
"size": "5765",
"license": "bsd-3-clause",
"hash": 8788931798378065000,
"line_mean": 26.0657276995,
"line_max": 80,
"alpha_frac": 0.5396357329,
"autogenerated": false,
"ratio": 3.2811610700056915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9320506998234039,
"avg_score": 0.0000579609343302614,
"num_lines": 213
} |
from __future__ import absolute_import, division, print_function
import io
import json
import os.path
from collections import defaultdict
from functools import wraps
from glob import glob
from itertools import chain
def gen_dict(func):
@wraps(func)
def decorated(*args, **kwargs):
return dict(func(*args, **kwargs))
return decorated
def ls_bench_storage(bench_storage, modes):
# NNNN just reflects the pytest-benchmark result files naming scheme:
# NNNN_commit*.json, that is, 0001_commit*.json, 0002_commit*.json, ...
nnnn_files_map = defaultdict(dict) # {'NNNN': {'mode': 'filename'}}
garbage_files = set()
for mode in modes:
for filename in glob(os.path.join(bench_storage, mode,
'[0-9][0-9][0-9][0-9]_*.json')):
mode_dirname, basename = os.path.split(filename)
nnnn = os.path.splitext(basename)[0][:12] # NNNN_commit
mode_nnnn_files = glob(os.path.join(mode_dirname, nnnn + '*.json'))
if len(mode_nnnn_files) != 1:
garbage_files.update(mode_nnnn_files)
else:
nnnn_files_map[nnnn][mode] = filename
benchmark_files = defaultdict(dict) # {'mode': {'NNNN': 'filename'}}
for nnnn, nnnn_files in nnnn_files_map.items():
if len(nnnn_files) != len(modes):
# for gf in nnnn_files.values():
# print('>>>', gf)
garbage_files.update(nnnn_files.values())
else:
for mode, filename in nnnn_files.items():
benchmark_files[mode][nnnn] = filename
return sorted(nnnn_files_map), dict(benchmark_files), sorted(garbage_files)
@gen_dict # {'mode': {'NNNN': benchmark, ...}}
def load_raw_benchmarks(benchmark_files):
for mode, filemap in benchmark_files.items():
trialmap = {}
for trial_name, filename in filemap.items():
with io.open(filename, 'rU') as fh:
trialmap[trial_name] = json.load(fh)
yield mode, trialmap
@gen_dict # {'mode': [{'test': min}...]}
def prepare_benchmarks(raw_benchmarks, trial_names):
for mode, trialmap in raw_benchmarks.items():
envlist = []
for trial_name in trial_names:
trial = trialmap.get(trial_name, {}).get('benchmarks', [])
benchenv = dict((bench['fullname'], bench['stats'].get('min'))
for bench in trial)
envlist.append(benchenv)
yield mode, envlist
def load_benchmarks(bench_storage, modes):
trial_names, benchmark_files, _ = ls_bench_storage(bench_storage, modes)
return load_benchmarks_from_files(benchmark_files, trial_names)
def load_benchmarks_from_files(benchmark_files, trial_names):
raw_benchmarks = load_raw_benchmarks(benchmark_files)
benchmarks = prepare_benchmarks(raw_benchmarks, trial_names)
return benchmarks
| {
"repo_name": "eisensheng/pytest-catchlog",
"path": "tests/perf/data.py",
"copies": "1",
"size": "2916",
"license": "mit",
"hash": 221081519422123520,
"line_mean": 33.3058823529,
"line_max": 79,
"alpha_frac": 0.6124828532,
"autogenerated": false,
"ratio": 3.5823095823095823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9694792435509583,
"avg_score": 0,
"num_lines": 85
} |
from __future__ import absolute_import, division, print_function
import io
import logging
import os
import re
from glob import has_magic
# for backwards compat, we export cache things from here too
from .caching import ( # noqa: F401
BaseCache,
BlockCache,
BytesCache,
MMapCache,
ReadAheadCache,
caches,
)
from .compression import compr
from .registry import filesystem, get_filesystem_class
from .utils import (
build_name_function,
infer_compression,
stringify_path,
update_storage_options,
)
logger = logging.getLogger("fsspec")
class OpenFile(object):
"""
File-like object to be used in a context
Can layer (buffered) text-mode and compression over any file-system, which
are typically binary-only.
These instances are safe to serialize, as the low-level file object
is not created until invoked using `with`.
Parameters
----------
fs: FileSystem
The file system to use for opening the file. Should match the interface
of ``dask.bytes.local.LocalFileSystem``.
path: str
Location to open
mode: str like 'rb', optional
Mode of the opened file
compression: str or None, optional
Compression to apply
encoding: str or None, optional
The encoding to use if opened in text mode.
errors: str or None, optional
How to handle encoding errors if opened in text mode.
newline: None or str
Passed to TextIOWrapper in text mode, how to handle line endings.
"""
def __init__(
self,
fs,
path,
mode="rb",
compression=None,
encoding=None,
errors=None,
newline=None,
):
self.fs = fs
self.path = path
self.mode = mode
self.compression = get_compression(path, compression)
self.encoding = encoding
self.errors = errors
self.newline = newline
self.fobjects = []
def __reduce__(self):
return (
OpenFile,
(
self.fs,
self.path,
self.mode,
self.compression,
self.encoding,
self.errors,
self.newline,
),
)
def __repr__(self):
return "<OpenFile '{}'>".format(self.path)
def __fspath__(self):
# may raise if cannot be resolved to local file
return self.open().__fspath__()
def __enter__(self):
mode = self.mode.replace("t", "").replace("b", "") + "b"
f = self.fs.open(self.path, mode=mode)
self.fobjects = [f]
if self.compression is not None:
compress = compr[self.compression]
f = compress(f, mode=mode[0])
self.fobjects.append(f)
if "b" not in self.mode:
# assume, for example, that 'r' is equivalent to 'rt' as in builtin
f = io.TextIOWrapper(
f, encoding=self.encoding, errors=self.errors, newline=self.newline
)
self.fobjects.append(f)
return self.fobjects[-1]
def __exit__(self, *args):
self.close()
def __del__(self):
if hasattr(self, "fobjects"):
self.fobjects.clear() # may cause cleanup of objects and close files
def open(self):
"""Materialise this as a real open file without context
The file should be explicitly closed to avoid enclosed file
instances persisting. This code-path monkey-patches the file-like
objects, so they can close even if the parent OpenFile object has already
been deleted; but a with-context is better style.
"""
out = self.__enter__()
closer = out.close
fobjects = self.fobjects.copy()[:-1]
mode = self.mode
def close():
# this func has no reference to
closer() # original close bound method of the final file-like
_close(fobjects, mode) # call close on other dependent file-likes
out.close = close
return out
def close(self):
"""Close all encapsulated file objects"""
_close(self.fobjects, self.mode)
class OpenFiles(list):
"""List of OpenFile instances
Can be used in a single context, which opens and closes all of the
contained files. Normal list access to get the elements works as
normal.
A special case is made for caching filesystems - the files will
be down/uploaded together at the start or end of the context, and
this may happen concurrently, if the target filesystem supports it.
"""
def __init__(self, *args, mode="rb", fs=None):
self.mode = mode
self.fs = fs
self.files = []
super().__init__(*args)
def __enter__(self):
if self.fs is None:
raise ValueError("Context has already been used")
fs = self.fs
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache download; or set up for upload
self.files = fs.open_many(self)
return self.files
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
return [s.__enter__() for s in self]
def __exit__(self, *args):
fs = self.fs
if "r" not in self.mode:
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache upload
fs.commit_many(self.files)
self.files.clear()
return
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
[s.__exit__(*args) for s in self]
def __repr__(self):
return "<List of %s OpenFile instances>" % len(self)
def _close(fobjects, mode):
for f in reversed(fobjects):
if "r" not in mode and not f.closed:
f.flush()
f.close()
fobjects.clear()
def open_files(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
name_function=None,
num=1,
protocol=None,
newline=None,
auto_mkdir=True,
expand=True,
**kwargs,
):
"""Given a path or paths, return a list of ``OpenFile`` objects.
For writing, a str path must contain the "*" character, which will be filled
in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
For either reading or writing, can instead provide explicit list of paths.
Parameters
----------
urlpath: string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
mode: 'rb', 'wt', etc.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
name_function: function or None
if opening a set of files for writing, those files do not yet exist,
so we need to generate their names by formatting the urlpath for
each sequence number
num: int [1]
if writing mode, number of files we expect to create (passed to
name+function)
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
auto_mkdir: bool (True)
If in write mode, this will ensure the target directory exists before
writing, by calling ``fs.mkdirs(exist_ok=True)``.
expand: bool
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files(
... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
... ) # doctest: +SKIP
Returns
-------
An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
be used as a single context
"""
fs, fs_token, paths = get_fs_token_paths(
urlpath,
mode,
num=num,
name_function=name_function,
storage_options=kwargs,
protocol=protocol,
expand=expand,
)
if "r" not in mode and auto_mkdir:
parents = {fs._parent(path) for path in paths}
[fs.makedirs(parent, exist_ok=True) for parent in parents]
return OpenFiles(
[
OpenFile(
fs,
path,
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
newline=newline,
)
for path in paths
],
mode=mode,
fs=fs,
)
def _un_chain(path, kwargs):
if isinstance(path, (tuple, list)):
bits = [_un_chain(p, kwargs) for p in path]
out = []
for pbit in zip(*bits):
paths, protocols, kwargs = zip(*pbit)
if len(set(protocols)) > 1:
raise ValueError("Protocol mismatch in URL chain")
if len(set(paths)) == 1:
paths = paths[0]
else:
paths = list(paths)
out.append([paths, protocols[0], kwargs[0]])
return out
x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
bits = (
[p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
if "::" in path
else [path]
)
if len(bits) < 2:
return []
# [[url, protocol, kwargs], ...]
out = []
previous_bit = None
for bit in reversed(bits):
protocol = split_protocol(bit)[0] or "file"
cls = get_filesystem_class(protocol)
extra_kwargs = cls._get_kwargs_from_urls(bit)
kws = kwargs.get(protocol, {})
kw = dict(**extra_kwargs, **kws)
bit = cls._strip_protocol(bit)
if (
protocol in {"blockcache", "filecache", "simplecache"}
and "target_protocol" not in kw
):
bit = previous_bit
out.append((bit, protocol, kw))
previous_bit = bit
out = list(reversed(out))
return out
def url_to_fs(url, **kwargs):
"""Turn fully-qualified and potentially chained URL into filesystem instance"""
chain = _un_chain(url, kwargs)
if len(chain) > 1:
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = protocol
inkwargs["fo"] = urls
urlpath, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
else:
protocol = split_protocol(url)[0]
cls = get_filesystem_class(protocol)
options = cls._get_kwargs_from_urls(url)
update_storage_options(options, kwargs)
fs = cls(**options)
urlpath = fs._strip_protocol(url)
return fs, urlpath
def open(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
protocol=None,
newline=None,
**kwargs,
):
"""Given a path or paths, return one ``OpenFile`` object.
Parameters
----------
urlpath: string or list
Absolute or relative filepath. Prefix with a protocol like ``s3://``
to read from alternative filesystems. Should not include glob
character(s).
mode: 'rb', 'wt', etc.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> openfile = open('2015-01-01.csv') # doctest: +SKIP
>>> openfile = open(
... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
... ) # doctest: +SKIP
>>> with openfile as f:
... df = pd.read_csv(f) # doctest: +SKIP
...
Returns
-------
``OpenFile`` object.
"""
return open_files(
urlpath=[urlpath],
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
protocol=protocol,
newline=newline,
expand=False,
**kwargs,
)[0]
def open_local(url, mode="rb", **storage_options):
"""Open file(s) which can be resolved to local
For files which either are local, or get downloaded upon open
(e.g., by file caching)
Parameters
----------
url: str or list(str)
mode: str
Must be read mode
storage_options:
passed on to FS for or used by open_files (e.g., compression)
"""
if "r" not in mode:
raise ValueError("Can only ensure local files when reading")
of = open_files(url, mode=mode, **storage_options)
if not getattr(of[0].fs, "local_file", False):
raise ValueError(
"open_local can only be used on a filesystem which"
" has attribute local_file=True"
)
with of as files:
paths = [f.name for f in files]
if isinstance(url, str) and not has_magic(url):
return paths[0]
return paths
def get_compression(urlpath, compression):
if compression == "infer":
compression = infer_compression(urlpath)
if compression is not None and compression not in compr:
raise ValueError("Compression type %s not supported" % compression)
return compression
def split_protocol(urlpath):
"""Return protocol, path pair"""
urlpath = stringify_path(urlpath)
if "://" in urlpath:
protocol, path = urlpath.split("://", 1)
if len(protocol) > 1:
# excludes Windows paths
return protocol, path
return None, urlpath
def strip_protocol(urlpath):
"""Return only path part of full URL, according to appropriate backend"""
protocol, _ = split_protocol(urlpath)
cls = get_filesystem_class(protocol)
return cls._strip_protocol(urlpath)
def expand_paths_if_needed(paths, mode, num, fs, name_function):
"""Expand paths if they have a ``*`` in them.
:param paths: list of paths
mode: str
Mode in which to open files.
num: int
If opening in writing mode, number of files we expect to create.
fs: filesystem object
name_function: callable
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
:return: list of paths
"""
expanded_paths = []
paths = list(paths)
if "w" in mode and sum([1 for p in paths if "*" in p]) > 1:
raise ValueError("When writing data, only one filename mask can be specified.")
elif "w" in mode:
num = max(num, len(paths))
for curr_path in paths:
if "*" in curr_path:
if "w" in mode:
# expand using name_function
expanded_paths.extend(_expand_paths(curr_path, name_function, num))
else:
# expand using glob
expanded_paths.extend(fs.glob(curr_path))
else:
expanded_paths.append(curr_path)
# if we generated more paths that asked for, trim the list
if "w" in mode and len(expanded_paths) > num:
expanded_paths = expanded_paths[:num]
return expanded_paths
def get_fs_token_paths(
urlpath,
mode="rb",
num=1,
name_function=None,
storage_options=None,
protocol=None,
expand=True,
):
"""Filesystem, deterministic token, and paths from a urlpath and options.
Parameters
----------
urlpath: string or iterable
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
mode: str, optional
Mode in which to open files.
num: int, optional
If opening in writing mode, number of files we expect to create.
name_function: callable, optional
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
storage_options: dict, optional
Additional keywords to pass to the filesystem class.
protocol: str or None
To override the protocol specifier in the URL
expand: bool
Expand string paths for writing, assuming the path is a directory
"""
if isinstance(urlpath, (list, tuple, set)):
if not urlpath:
raise ValueError("empty urlpath sequence")
urlpath = [stringify_path(u) for u in urlpath]
else:
urlpath = stringify_path(urlpath)
chain = _un_chain(urlpath, storage_options or {})
if len(chain) > 1:
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, nested_protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = nested_protocol
inkwargs["fo"] = urls
paths, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
if isinstance(paths, (list, tuple, set)):
paths = [fs._strip_protocol(u) for u in paths]
else:
paths = fs._strip_protocol(paths)
else:
if isinstance(urlpath, (list, tuple, set)):
protocols, paths = zip(*map(split_protocol, urlpath))
if protocol is None:
protocol = protocols[0]
if not all(p == protocol for p in protocols):
raise ValueError(
"When specifying a list of paths, all paths must "
"share the same protocol"
)
cls = get_filesystem_class(protocol)
optionss = list(map(cls._get_kwargs_from_urls, urlpath))
paths = [cls._strip_protocol(u) for u in urlpath]
options = optionss[0]
if not all(o == options for o in optionss):
raise ValueError(
"When specifying a list of paths, all paths must "
"share the same file-system options"
)
update_storage_options(options, storage_options)
fs = cls(**options)
else:
protocols = split_protocol(urlpath)[0]
protocol = protocol or protocols
cls = get_filesystem_class(protocol)
options = cls._get_kwargs_from_urls(urlpath)
paths = cls._strip_protocol(urlpath)
update_storage_options(options, storage_options)
fs = cls(**options)
if isinstance(paths, (list, tuple, set)):
paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
else:
if "w" in mode and expand:
paths = _expand_paths(paths, name_function, num)
elif "*" in paths:
paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
else:
paths = [paths]
return fs, fs._fs_token, paths
def _expand_paths(path, name_function, num):
if isinstance(path, str):
if path.count("*") > 1:
raise ValueError("Output path spec must contain exactly one '*'.")
elif "*" not in path:
path = os.path.join(path, "*.part")
if name_function is None:
name_function = build_name_function(num - 1)
paths = [path.replace("*", name_function(i)) for i in range(num)]
if paths != sorted(paths):
logger.warning(
"In order to preserve order between partitions"
" paths created with ``name_function`` should "
"sort to partition order"
)
elif isinstance(path, (tuple, list)):
assert len(path) == num
paths = list(path)
else:
raise ValueError(
"Path should be either\n"
"1. A list of paths: ['foo.json', 'bar.json', ...]\n"
"2. A directory: 'foo/\n"
"3. A path with a '*' in it: 'foo.*.json'"
)
return paths
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/core.py",
"copies": "1",
"size": "21270",
"license": "bsd-3-clause",
"hash": 7342794919521575000,
"line_mean": 31.2761760243,
"line_max": 87,
"alpha_frac": 0.5740479549,
"autogenerated": false,
"ratio": 4.148624926857812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014591733536426776,
"num_lines": 659
} |
from __future__ import absolute_import, division, print_function
import io
import logging
import os.path
import sys
import pprint
from virtualenv.builders.base import BaseBuilder
from virtualenv._utils import copyfile
from virtualenv._utils import ensure_directory
SITE = b"""# -*- encoding: utf-8 -*-
import os
import os.path
import sys
try:
# Workaround for pypy losing it's builtin _struct module. The bundled
# _struct.py is horribly broken, see:
# https://bitbucket.org/pypy/pypy/issue/1959/zipimport-issue-unorderable-types-str-int
import _struct
except ImportError:
pass
try:
# Workaround for pypy having a `builtin` symbol module. Don't laugh, it's not funny!
import symbol
except ImportError:
pass
# We want to stash the global site-packages here, this will be None if we're
# not adding them.
global_site_packages = __GLOBAL_SITE_PACKAGES__
# We want to make sure that our sys.prefix and sys.exec_prefix match the
# locations in our virtual enviornment.
# Note that case normalization is done here instead of the host interpreter
# to avoid differences - if any.
sys.prefix = os.path.normcase(__PREFIX__)
sys.exec_prefix = os.path.normcase(__EXEC_PREFIX__)
# We want to record what the "real/base" prefix is of the virtual environment.
# We store the variants `real_` prefix for compatibility with code that detects
# virtualenvs that way. Eg: Debian patches, pip, wheel etc ...
sys.real_prefix = sys.base_prefix = os.path.normcase(__BASE_PREFIX__)
sys.real_exec_prefix = sys.base_exec_prefix = os.path.normcase(__BASE_EXEC_PREFIX__)
# This is used in the creation phase when pip is installed in the virtualenv.
# We don't want those damn egg tripping up our PYTHONPATH whl loading, so we
# instruct setuptools to add the eggs at the end of sys.path (or whatever position
# VIRTUALENV_BOOTSTRAP_ADJUST_EGGINSERT specifies)
if "VIRTUALENV_BOOTSTRAP_ADJUST_EGGINSERT" in os.environ:
sys.__egginsert = int(os.environ["VIRTUALENV_BOOTSTRAP_ADJUST_EGGINSERT"])
# At the point this code is running, the only paths on the sys.path are the
# paths that the interpreter adds itself. These are essentially the locations
# it looks for the various stdlib modules. Since we are inside of a virtual
# environment these will all be relative to the sys.prefix and sys.exec_prefix,
# however we want to change these to be relative to sys.base_prefix and
# sys.base_exec_prefix instead.
new_sys_path = []
for path in sys.path:
# TODO: Is there a better way to determine this?
path = os.path.normcase(os.path.realpath(os.path.abspath(path)))
if path.startswith(sys.prefix):
path = os.path.join(
sys.base_prefix,
path[len(sys.prefix):].lstrip(os.path.sep),
)
elif path.startswith(sys.exec_prefix):
path = os.path.join(
sys.base_exec_prefix,
path[len(sys.exec_prefix):].lstrip(os.path.sep),
)
new_sys_path.append(path)
sys.path = new_sys_path
# We want to empty everything that has already been imported from the
# sys.modules so that any additional imports of these modules will import them
# from the base Python and not from the copies inside of the virtual
# environment. This will ensure that our copies will only be used for
# bootstrapping the virtual environment.
# TODO: is this really necessary? They would be the same modules as the global ones after all ///
dirty_modules = [
# TODO: there might be less packages required but we
# need to extend the integration tests to see exactly what
"__builtin__",
"__main__",
"_frozen_importlib",
"_frozen_importlib_external",
"builtins",
"codecs",
"encodings",
"site",
"sitecustomize",
"symbol",
]
dirty_modules.extend(sys.builtin_module_names)
for key in list(sys.modules):
# We don't want to purge these modules because if we do, then things break
# very badly.
parts = key.split('.')
if parts[0] in dirty_modules:
continue
else:
del sys.modules[key]
# We want to trick the interpreter into thinking that the user specific
# site-packages has been requested to be disabled. We'll do this by mimicing
# that sys.flags.no_user_site has been set to False, however sys.flags is a
# read-only structure so we'll temporarily replace it with one that has the
# same values except for sys.flags.no_user_site which will be set to True.
_real_sys_flags = sys.flags
class AttrDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getattr__(self, name):
return self[name]
sys.flags = AttrDict((k, getattr(sys.flags, k)) for k in dir(sys.flags))
sys.flags["no_user_site"] = True
# We want to import the *real* site module from the base Python. Actually
# attempting to do an import here will just import this module again, so we'll
# just read the real site module and exec it.
with open(__SITE__) as fp:
exec(fp.read())
# Restore the real sys.flags
sys.flags = _real_sys_flags
# On Debian-enized platforms the site.py won't add site-packages on sys.path
# (it will add dist-packages and other crazy stuff).
# Therefore we need to match pip's install location (site-packages)
from distutils import sysconfig
python_lib = sysconfig.get_python_lib()
if python_lib not in sys.path:
addsitedir(python_lib)
# If we're running with the global site-packages enabled, then we'll want to
# go ahead and enable it here so that it comes after the virtual environment's
# site-package.
if global_site_packages is not None:
# Force a re-check of ENABLE_USER_SITE so that we get the "real" value
# instead of our forced off value.
ENABLE_USER_SITE = check_enableusersite()
# Add the actual user site-packages if we're supposed to.
addusersitepackages(None)
# Add the actual global site-packages.
for path in global_site_packages:
addsitedir(path)
# Apply distutils patches. Originally from virtualenv_embedded/distutils-init.py
# Support for per virtualenv distutils.cfg is missing.
def patch(module, name):
original = getattr(module, name)
def decorator(replacement):
def wrapper(*args, **kwargs):
return replacement(original, *args, **kwargs)
wrapper.__doc__ = original.__doc__
setattr(module, name, wrapper)
return decorator
try:
basestring
except NameError:
basestring = str
# Patch build_ext (distutils doesn't know how to get the libs directory
# path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
@patch(build_ext, 'finalize_options')
def finalize_options(original, self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
original(self)
## distutils.sysconfig patches:
from distutils import sysconfig
@patch(sysconfig, 'get_python_inc')
def sysconfig_get_python_inc(original, plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return original(plat_specific, prefix)
@patch(sysconfig, 'get_python_lib')
def sysconfig_get_python_lib(original, plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return original(plat_specific, standard_lib, prefix)
@patch(sysconfig, 'get_config_vars')
def sysconfig_get_config_vars(original, *args):
real_vars = original(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
"""
logger = logging.getLogger(__name__)
class LegacyBuilder(BaseBuilder):
@classmethod
def check_available(cls, python):
# TODO: Do we ever want to make this builder *not* available?
return True
def _locate_module(self, mod, search_paths):
for search_path in search_paths:
pymod = os.path.join(search_path, mod + ".py")
if os.path.exists(pymod):
return pymod
path = os.path.join(search_path, mod)
if os.path.exists(path):
return path
def _path_repr(self, string):
return (
b"'" +
string.encode(sys.getfilesystemencoding())
.replace(b"'", b"\\'")
.replace(b"\\", b"\\\\") +
b"'"
)
def create_virtual_environment(self, destination):
logger.debug("Getting python info: \n%s", pprint.pformat(self._python_info))
# Create our binaries that we'll use to create the virtual environment
bin_dir = os.path.join(destination, self.flavor.bin_dir(self._python_info))
ensure_directory(bin_dir)
for python_bin in self.flavor.python_bins(self._python_info):
copyfile(
self._python_info["sys.executable"],
os.path.join(bin_dir, python_bin),
)
# Copy extra bins, like some DLLs PyPy likes to have in it's bin dir ...
for extra_bin in self.flavor.extra_bins(self._python_info):
for bin_src in set([
os.path.join(self._python_info["sys.prefix"], extra_bin),
os.path.join(os.path.dirname(self._python_info["sys.executable"]), extra_bin),
]):
if os.path.exists(bin_src):
copyfile(
bin_src,
os.path.join(bin_dir, extra_bin),
)
# Create our lib directory, this is going to hold all of the parts of
# the standard library that we need in order to ensure that we can
# successfully bootstrap a Python interpreter.
lib_dir = os.path.join(
destination,
self.flavor.lib_dir(self._python_info)
)
ensure_directory(lib_dir)
# Create our site-packages directory, this is the thing that end users
# really want control over.
site_packages_dir = os.path.join(lib_dir, "site-packages")
ensure_directory(site_packages_dir)
# The site module has a number of required modules that it needs in
# order to be successfully imported, so we'll copy each of those module
# into our virtual environment's lib directory as well. Note that this
# list also includes the os module, but since we've already copied
# that we'll go ahead and omit it.
sys_prefix = self._python_info["sys.prefix"]
lib_dirs = [
path for path in self._python_info["sys.path"]
if path.startswith(sys_prefix)
# TODO: this has an unhandled edgecase, it handle case with
# partial match (should only match full components)
]
for module in self.flavor.bootstrap_modules(self._python_info):
modulepath = self._locate_module(module, lib_dirs)
if modulepath:
copyfile(
modulepath,
os.path.join(
destination,
os.path.relpath(modulepath, sys_prefix)
),
)
osmodulepath = self._locate_module("os", lib_dirs)
if not osmodulepath:
raise RuntimeError("Can't locate os module in any of %s." % lib_dirs)
osmoduledestination = os.path.join(
destination,
os.path.relpath(osmodulepath, sys_prefix)
)
copyfile(osmodulepath, osmoduledestination)
include_dir = self.flavor.include_dir(self._python_info)
src_include_dir = os.path.join(self._python_info["sys.prefix"], include_dir)
if os.path.exists(src_include_dir):
copyfile(src_include_dir, os.path.join(destination, include_dir))
copyfile(src_include_dir, os.path.join(destination, "local", include_dir))
else:
logger.critical("You're missing %r. You many need to install the development packages for this intepreter.",
src_include_dir)
dst = os.path.join(os.path.dirname(osmoduledestination), "site.py")
logger.debug("Writing %s", dst)
with io.open(dst, "wb") as dst_fp:
# Get the data from our source file, and replace our special
# variables with the computed data.
data = SITE
data = data.replace(b"__PREFIX__", self._path_repr(destination))
data = data.replace(b"__EXEC_PREFIX__", self._path_repr(destination))
data = data.replace(
b"__BASE_PREFIX__",
self._path_repr(self._python_info["sys.prefix"]),
)
data = data.replace(
b"__BASE_EXEC_PREFIX__", self._path_repr(self._python_info["sys.exec_prefix"]),
)
data = data.replace(b"__SITE__", self._path_repr(self._python_info["site.py"]))
data = data.replace(
b"__GLOBAL_SITE_PACKAGES__",
(
b"[" +
b", ".join(
self._path_repr(path) for path in self._python_info["site.getsitepackages"]
) +
b"]"
) if self.system_site_packages else b"None",
)
if self.verbose:
dst_fp.write(b"""# -*- encoding: utf-8 -*-
import sys
print("DEBUG: sys.modules:")
for m in sorted(sys.modules):
print("DEBUG: %s - %s" % (m, sys.modules[m]))
print("DEBUG: sys.path:")
for p in sys.path:
print("DEBUG: %s" % p)
""")
# Write the final site.py file to our lib directory
dst_fp.write(data)
if self.verbose:
dst_fp.write(b"""
print("DEBUG: sys.path after site.py run:")
for p in sys.path:
print("DEBUG: %s" % p)
""")
| {
"repo_name": "ionelmc/virtualenv",
"path": "virtualenv/builders/legacy.py",
"copies": "1",
"size": "14244",
"license": "mit",
"hash": 9073391720060456000,
"line_mean": 37.7065217391,
"line_max": 120,
"alpha_frac": 0.6378124122,
"autogenerated": false,
"ratio": 3.8372844827586206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997233917918046,
"avg_score": 0.0005515431556321323,
"num_lines": 368
} |
from __future__ import absolute_import, division, print_function
import io
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
# Version is defined in biostructmap/__init__.py
__version__ = "Undefined"
for line in open('biostructmap/__init__.py'):
if (line.startswith('__version__')):
exec(line.strip())
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
LONG_DESCRIPTION = read('README.rst', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='biostructmap',
version=__version__,
url='https://github.com/andrewguy/biostructmap',
download_url='https://github.com/andrewguy/biostructmap/archive/{0}.tar.gz'.format(__version__),
author='Andrew Guy',
tests_require=['pytest'],
setup_requires=['numpy'],
install_requires=['Biopython>=1.66',
'DendroPy>=4.0.3',
'numpy'
],
cmdclass={'test': PyTest},
author_email='andrewjguy42@gmail.com',
description='A package for mapping biological data onto protein PDB structures',
long_description=LONG_DESCRIPTION,
packages=['biostructmap'],
include_package_data=True,
platforms='any',
test_suite='tests.test_biostructmap',
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
extras_require={
'testing': ['pytest'],
}
)
| {
"repo_name": "andrewguy/biostructmap",
"path": "setup.py",
"copies": "1",
"size": "2221",
"license": "mit",
"hash": -8333615552728129000,
"line_mean": 30.2816901408,
"line_max": 100,
"alpha_frac": 0.6240432238,
"autogenerated": false,
"ratio": 3.8030821917808217,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49271254155808214,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import io
import os.path
import tarfile
import tempfile
from requests.utils import urlparse
import appr.pack as packager
from appr.client import ApprClient
class FormatBase(object):
media_type = NotImplementedError
target = NotImplementedError
kub_class = NotImplementedError
manifest_file = []
appr_client = ApprClient
def __init__(self, name, version=None, endpoint=None, ssl_verify=True, **kwargs):
self._deploy_name = name
self._deploy_version = version or {"key": "version", "value": 'default'}
self.endpoint = endpoint
self._registry = self.appr_client(endpoint=self.endpoint, requests_verify=ssl_verify)
self._package = None
self._manifest = None
@property
def package(self):
if self._package is None:
result = self._fetch_package()
self._package = packager.ApprPackage(result, b64_encoded=True)
return self._package
def _create_manifest(self):
raise NotImplementedError
@property
def manifest(self):
if self._manifest is None:
self._manifest = self._create_manifest()
return self._manifest
def __unicode__(self):
return ("(<{class_name}({name}=={version})>".format(class_name=self.__class__.__name__,
name=self.name, version=self.version))
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
return self.__str__()
@property
def author(self):
pass
@property
def version(self):
return self.manifest.version
@property
def description(self):
pass
@property
def name(self):
return self.manifest.name
@property
def variables(self):
pass
def _fetch_package(self):
parse = urlparse(self._deploy_name)
if parse.scheme in ["http", "https"]:
# @TODO
pass
elif parse.scheme == "file":
parts = parse.path.split("/")
_, ext = os.path.splitext(parts[-1])
if ext == ".gz":
filepath = parse.path
else:
filepath = tempfile.NamedTemporaryFile().name
packager.pack_kub(filepath)
with open(filepath, "rb") as tarf:
return tarf.read()
else:
return self._registry.pull_json(self._deploy_name, self._deploy_version,
self.media_type)['blob']
def make_tarfile(self, source_dir):
output = io.BytesIO()
with tarfile.open(fileobj=output, mode="w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
return output
| {
"repo_name": "app-registry/appr",
"path": "appr/formats/base.py",
"copies": "2",
"size": "2847",
"license": "apache-2.0",
"hash": 8255804244835634000,
"line_mean": 28.0510204082,
"line_max": 98,
"alpha_frac": 0.5753424658,
"autogenerated": false,
"ratio": 4.3071104387291985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5882452904529198,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ipaddress
import pytest
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_pem_x509_certificate
from service_identity import SubjectAltNameWarning
from service_identity._common import (
DNS_ID,
DNSPattern,
IPAddress_ID,
IPAddressPattern,
URIPattern,
)
from service_identity.cryptography import (
extract_ids,
verify_certificate_hostname,
verify_certificate_ip_address,
)
from service_identity.exceptions import (
DNSMismatch,
IPAddressMismatch,
VerificationError,
)
from .util import PEM_CN_ONLY, PEM_DNS_ONLY, PEM_EVERYTHING, PEM_OTHER_NAME
backend = default_backend()
X509_DNS_ONLY = load_pem_x509_certificate(PEM_DNS_ONLY, backend)
X509_CN_ONLY = load_pem_x509_certificate(PEM_CN_ONLY, backend)
X509_OTHER_NAME = load_pem_x509_certificate(PEM_OTHER_NAME, backend)
CERT_EVERYTHING = load_pem_x509_certificate(PEM_EVERYTHING, backend)
class TestPublicAPI(object):
def test_certificate_verify_hostname_ok(self):
"""
verify_certificate_hostname succeeds if the hostnames match.
"""
verify_certificate_hostname(X509_DNS_ONLY, u"twistedmatrix.com")
def test_certificate_verify_hostname_fail(self):
"""
verify_certificate_hostname fails if the hostnames don't match and
provides the user with helpful information.
"""
with pytest.raises(VerificationError) as ei:
verify_certificate_hostname(X509_DNS_ONLY, u"google.com")
assert [
DNSMismatch(mismatched_id=DNS_ID(u"google.com"))
] == ei.value.errors
@pytest.mark.parametrize("ip", [u"1.1.1.1", u"::1"])
def test_verify_certificate_ip_address_ok(self, ip):
"""
verify_certificate_ip_address succeeds if the addresses match. Works
both with IPv4 and IPv6.
"""
verify_certificate_ip_address(CERT_EVERYTHING, ip)
@pytest.mark.parametrize("ip", [u"1.1.1.2", u"::2"])
def test_verify_ip_address_fail(self, ip):
"""
verify_ip_address fails if the addresses don't match and provides the
user with helpful information. Works both with IPv4 and IPv6.
"""
with pytest.raises(VerificationError) as ei:
verify_certificate_ip_address(CERT_EVERYTHING, ip)
assert [
IPAddressMismatch(mismatched_id=IPAddress_ID(ip))
] == ei.value.errors
class TestExtractIDs(object):
def test_dns(self):
"""
Returns the correct DNSPattern from a certificate.
"""
rv = extract_ids(X509_DNS_ONLY)
assert [
DNSPattern(b"www.twistedmatrix.com"),
DNSPattern(b"twistedmatrix.com"),
] == rv
def test_cn_ids_are_used_as_fallback(self):
"""
CNs are returned as DNSPattern if no other IDs are present
and a warning is raised.
"""
with pytest.warns(SubjectAltNameWarning):
rv = extract_ids(X509_CN_ONLY)
assert [DNSPattern(b"www.microsoft.com")] == rv
def test_uri(self):
"""
Returns the correct URIPattern from a certificate.
"""
rv = extract_ids(X509_OTHER_NAME)
assert [URIPattern(b"http://example.com/")] == [
id for id in rv if isinstance(id, URIPattern)
]
def test_ip(self):
"""
Returns IP patterns.
"""
rv = extract_ids(CERT_EVERYTHING)
assert [
DNSPattern(pattern=b"service.identity.invalid"),
DNSPattern(pattern=b"*.wildcard.service.identity.invalid"),
DNSPattern(pattern=b"service.identity.invalid"),
DNSPattern(pattern=b"single.service.identity.invalid"),
IPAddressPattern(pattern=ipaddress.IPv4Address(u"1.1.1.1")),
IPAddressPattern(pattern=ipaddress.IPv6Address(u"::1")),
IPAddressPattern(pattern=ipaddress.IPv4Address(u"2.2.2.2")),
IPAddressPattern(pattern=ipaddress.IPv6Address(u"2a00:1c38::53")),
] == rv
| {
"repo_name": "pyca/service_identity",
"path": "tests/test_cryptography.py",
"copies": "1",
"size": "4109",
"license": "mit",
"hash": 5848667481885968000,
"line_mean": 32.1370967742,
"line_max": 78,
"alpha_frac": 0.6454125091,
"autogenerated": false,
"ratio": 3.7286751361161525,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48740876452161525,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import copy
import numpy as np
from horton import *
from .fields_generate import Fields
from .function_fit import poly_fit,rat_fit, solve
def finitefield_ham(ham, lf, obasis, f_order, field, xyz=None):
"""
This function return back hamiltonian with multipole moment term in external field
**Arguments**
lf
LinalgFactory object from horton
obasis
Gaussian basis set object from horton
f_order
order of field
field
field
xyz
center of multipole moment, it's [0. 0. 0.] by default
"""
if not xyz:
xyz = np.zeros(3)
mm = DenseTwoIndex(obasis.nbasis)
for j, perm in enumerate(itertools.combinations_with_replacement((0, 1, 2), f_order)):
mask = np.zeros(3, dtype=np.int64)
for i in perm:
mask[i] += 1
mm_tmp = obasis.compute_multipolemoment(mask, xyz, lf)
mm_tmp._array = mm_tmp._array*field[j]
mm._array += mm_tmp._array
ham.terms.append(RTwoIndexTerm(mm,'mm'))
return ham
def finitefield_energy(ham, lf, olp, orb, occ_model, method='hf'):
"""
This function return back energy with multipole moment term in external field
**Arguments**
olp
overlap matrix from horton
orb
alpha orbitals from horton, be carefully it also named exp_alpha. Make sure the
varible name keep consistant when you use it.
occ_model
horton object to decide how to occupy the orbitals
"""
if method is 'hf':
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, orb)
else:
raise Exception
return ham.compute_energy()
def model_finitefield_ham(ham, lf, obasis, olp, orb, occ_model, mol, f_order, p_order, method='hf'):
"""
This function return back a set of fields and the energys for function fitting
**Arguments**
f_order
a list include field order
p_order
polarizability order
"""
ham_backup = ham
energys = []
obj = Fields(mol)
fields = obj.fields(f_order, mol)
for i, field in enumerate(fields):
n0 = 0
n1 = 0
for i in f_order:
n1 +=(i+1)*(i+2)/2
ham = copy.deepcopy(ham_backup)
field_i=field[n0:n1]
ffham = finitefield_ham(ham, lf, obasis, i, field_i)
n0 = n1
energys.append(finitefield_energy(ffham, lf, olp, orb, occ_model, method=method))
return solve(fields, energys, f_order, [3,4], p_order)
| {
"repo_name": "fhqgfss/polar",
"path": "polar/data_generate.py",
"copies": "1",
"size": "2615",
"license": "mit",
"hash": 6013025386175439000,
"line_mean": 24.1442307692,
"line_max": 100,
"alpha_frac": 0.6145315488,
"autogenerated": false,
"ratio": 3.48202396804261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45965555168426103,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
from glob import glob
import heapq
from collections import Iterable, Iterator
from toolz import (merge, concat, frequencies, merge_with, take, curry, reduce,
join, reduceby, compose, second, valmap, count, map, partition_all,
filter, pluck)
try:
from cytoolz import (curry, frequencies, merge_with, join, reduceby,
compose, second, count, pluck)
except ImportError:
pass
from ..multiprocessing import get as mpget
from ..core import istask, get_dependencies, reverse_dict
from ..optimize import fuse
names = ('bag-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head is list:
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
get = curry(mpget, optimizations=[fuse, lazify])
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
class Item(object):
def __init__(self, dsk, key, get=get):
self.dask = dsk
self.key = key
self.get = get
def compute(self):
return self.get(self.dask, self.key)
__int__ = __float__ = __complex__ = __bool__ = compute
class Bag(object):
""" Unordered collection with repeats
Computed in paritions with dask
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions, get=get):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.get = get
@classmethod
def from_filenames(cls, filenames):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = Bag.from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = Bag.from_filenames('myfiles.*.txt') # doctest: +SKIP
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
d = dict((('load', i), (list, (open, fn)))
for i, fn in enumerate(filenames))
return Bag(d, 'load', len(d))
@classmethod
def from_sequence(cls, seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = Bag.from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def map(self, func):
name = next(names)
dsk = dict(((name, i), (list, (map, func, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def filter(self, predicate):
name = next(names)
dsk = dict(((name, i), (list, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key):
name = next(names)
if isinstance(key, list):
key = (list2, key)
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def fold(self, binop, combine=None, initial=None):
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
a = next(names)
b = next(names)
if key:
topk = curry(heapq.nlargest, key=key)
else:
topk = heapq.nlargest
dsk = dict(((a, i), (list, (topk, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (topk, k, (concat, list(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def _reduction(self, perpartition, aggregate):
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def sum(self):
return self._reduction(sum, sum)
def max(self):
return self._reduction(max, max)
def min(self):
return self._reduction(min, min)
def any(self):
return self._reduction(any, any)
def all(self):
return self._reduction(all, all)
def count(self):
return self._reduction(count, sum)
def mean(self):
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self._reduction(chunk, agg)
def var(self, ddof=0):
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self._reduction(chunk, agg)
def std(self, ddof=0):
return math.sqrt(self.var(ddof=ddof))
def join(self, other, on_self, on_other=None):
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return Bag(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=None, combine=None,
combine_initial=None):
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(curry, reduce, combine),
list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k):
name = next(names)
return Bag(merge(self.dask, {(name, 0): (list, (take, k, (self.name,
0)))}), name, 1)
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def __iter__(self):
results = self.get(self.dask, self._keys())
if isinstance(results[0], Iterable):
results = concat(results)
if not isinstance(results, Iterator):
results = iter(results)
return results
def dictitems(d):
""" A pickleable version of dict.items """
return list(d.items())
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "11192",
"license": "bsd-3-clause",
"hash": -499842993702536770,
"line_mean": 31.6297376093,
"line_max": 86,
"alpha_frac": 0.5229628306,
"autogenerated": false,
"ratio": 3.607994842037395,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4630957672637395,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
import distributed
import numpy as np
from time import time
from toolz import concat, take
from .clients import DaskClient, SerialClient
from .stencil import RightHandedSimplexStencil
class Point(object):
__slots__ = 'point', 'halvings', 'parent', 'is_accepted', 'start_time', 'stop_time', 'result'
def __init__(self, point, halvings):
self.point = point
self.halvings = halvings
self.is_accepted = False
self.stop_time = None
def __hash__(self):
return hash(self.point.tostring())
def __eq__(self, other):
return np.array_equal(self.point, other.point)
def __repr__(self):
return type(self).__name__ + repr(self.point)[len('array'):]
def randomize_chunk(i, it):
L = list(take(i, it))
np.random.shuffle(L)
return L
def randomize_stencil(dims, it):
return concat(randomize_chunk(i, it) for i in itertools.count(2*dims, dims))
def run_batch(func, points, args):
return tuple(func(point, *args) for point in points)
def search(func, x0, stepsize, client=None, args=(), max_queue_size=None,
min_queue_size=None, min_new_submit=0, randomize=True,
max_stencil_size=None, stopratio=0.01, max_tasks=None, max_time=None,
integer_dimensions=None, batchsize=None, vectorize=False):
""" Perform an asynchronous pattern search to minimize a function.
A pattern of trial points is created around the current best point.
No derivatives are calculated. Instead, this pattern shrinks as the
algorithm converges. Tasks and results are submitted and collected
fully asynchronously, and the current best point is updated as soon
as possible. This algorithm should be able to scale to use any
number of cores, although there are practical limitations such as
scheduler overhead and memory usage. For example, using 100,000
cores to minimize a 100-D objective function should work just fine.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``func(x, *args)`` where ``x`` is a 1-D array and ``args`` is
a tuple of extra arguments passed to the objective function.
x0 : ndarray
1-D array; the initial guess.
stepsize : ndarray
1-D array; the initial step sizes for each dimension. This may
be repeatedly halved or doubled as the algorithm proceeds. It
is best to choose step sizes larger than the features you want
the algorithm to step over.
client : dask.distributed.Client, optional
Typically, a client to a ``dask.distributed`` scheduler should
be passed. If a client is not given, then the algorithm will
run serially in the current thread, and the default queue size
will be ``3 * len(x0)``.
args : tuple, optional
Extra arguments passed to the objective function.
max_queue_size : int or None, optional
Maximum number of active tasks to have submitted to the client.
Default is the greater of ``3 * len(x0)`` or the total number
of threads plus the total number of worker processes of the the
client's cluster. This default is chosen to maximize occupancy
of available cores, but, in general, ``max_queue_size`` does
not need to be related to compute resources at all. Choosing a
larger ``max_queue_size`` is the best way to improve robustness
of the algorithm.
min_queue_size : int or None, optional
Minimum number of active tasks to have submitted to the client.
Default is ``max_queue_size // 2``.
min_new_submit : int, optional
The minimum number of trial points to submit after a new best
point has been found before accepting an even better point.
This may help when there are multiple minima being explored.
randomize : bool, optional
Whether to randomize the order of trial points (default True).
max_stencil_size: int or None, optional
The maximum size of the stencil used to create the pattern of
trial points around the current best point. Default unlimited.
stopratio : float, optional
Termination condition: stop after the step size has been reduced
by this amount. Must be between 0 and 1. Default is 0.01.
max_tasks : int or None, optional
Termination condition: stop after this many tasks have been
completed. Default unlimited.
max_time : float or None, optional
Termination condition: stop submitting new tasks after this many
seconds have passed. Default unlimited.
integer_dimensions : array-like or None, optional
1-D array; specify the indices of integer dimensions.
batchsize : int or None, optional
Evaluate this many trial points in a single task. This is useful
when the objective function is fast or vectorized.
vectorize : bool
Set to True if the objective function is vectorized. This means
it accepts a 2-D array of points and returns a 1-D array of
results. This can dramatically improve performance.
``batchsize`` must be given if ``vectorize`` is True.
Returns
-------
best_point: Point
The optimization result. ``best_point.point`` is the ndarray.
results: dict
All evalulated points and their scores.
"""
# bound=None, low_memory_stencil=False
if max_queue_size is None:
max_queue_size = 3 * len(x0)
if batchsize is not None:
max_queue_size = max_queue_size // batchsize + 1
if client is not None and hasattr(client, 'ncores'):
ncores = client.ncores()
max_queue_size = max(max_queue_size, sum(ncores.values()) + len(ncores))
if min_queue_size is None:
min_queue_size = max(1, max_queue_size // 2)
if max_stencil_size is None:
max_stencil_size = 1e9
if vectorize and batchsize is None:
raise ValueError('batchsize argument must be given if vectorize is True')
x0 = np.array(x0)
stepsize = np.array(stepsize)
dims = len(stepsize)
max_halvings = math.frexp(1 / stopratio)[1]
stencil = RightHandedSimplexStencil(dims, max_halvings)
gridsize = stepsize / 2.**max_halvings
if integer_dimensions is not None:
integer_dimensions = np.array(integer_dimensions)
int_dims = np.zeros(len(x0), dtype=np.bool)
int_dims[integer_dimensions] = 1
x0[int_dims] = np.round(x0[int_dims])
def to_grid(x):
return np.round(x / gridsize) * gridsize
orientation = np.ones(dims)
cur_point = Point(to_grid(x0), -1)
cur_point.start_time = time()
cur_point.parent = cur_point
cur_cost = result = np.inf
is_contraction = True
new_point = None
if max_time is not None:
end_time = time() + max_time
current_batch = []
running = {}
processing = []
results = {}
contract_conditions = set()
next_point = None
next_cost = None
if client is None:
client = SerialClient()
elif isinstance(client, distributed.Client):
client = DaskClient(client)
if batchsize is None:
def submit_point(point):
results[point] = None
future = client.submit(func, point.point, *args)
running[future] = point
else:
def submit_point(point):
results[point] = None
current_batch.append(point)
if len(current_batch) >= batchsize:
points = (p.point for p in current_batch)
if vectorize:
future = client.submit(func, np.stack(points), *args)
else:
future = client.submit(run_batch, func, tuple(points), args)
running[future] = tuple(current_batch)
current_batch[:] = []
submit_point(cur_point)
is_finished = False
while not is_finished or running or next_point is not None or new_point is not None:
if max_time is not None and time() > end_time:
is_finished = True
# Initialize new point
if new_point is not None or is_contraction:
if is_contraction:
is_contraction = False
if cur_point.stop_time is None:
cur_point.stop_time = time()
new_point = Point(cur_point.point, cur_point.halvings + 1)
new_point.parent = cur_point
new_point.is_accepted = True
new_point.result = cur_cost
new_cost = cur_cost
new_point.start_time = time()
cur_point = new_point
cur_cost = new_cost
new_point = None
new_cost = None
cur_stepsize = to_grid(orientation * stepsize / 2.**cur_point.halvings)
if integer_dimensions is not None:
# Change the minimum step size for integers to 1
cur_stepsize[int_dims & (cur_stepsize < 0) & (cur_stepsize > -1)] = -1
cur_stepsize[int_dims & (cur_stepsize > 0) & (cur_stepsize < 1)] = 1
cur_added = 0
contract_conditions.clear()
it = stencil.generate_stencil_points()
if randomize:
it = randomize_stencil(dims, it)
stencil_points = enumerate(it, 1)
stencil_index = 0
if cur_point.halvings >= max_halvings:
is_finished = True
# Fill task queue with trial points while waiting for results
if not is_finished:
while (
len(running) < max_queue_size
and stencil_index < max_stencil_size
and (
len(running) < min_queue_size
or cur_added < min_new_submit
or next_point is None and not client.has_results()
)
):
try:
stencil_index, step = next(stencil_points)
except StopIteration:
if stencil_index < 2 * dims:
raise
# else warn
stencil_index = max_stencil_size = stencil_index
break
if (
cur_added >= min_new_submit
and stencil_index > 2 * dims
and not contract_conditions
):
is_contraction = True
break
halvings = step.halvings + cur_point.halvings
dx = step.point * cur_stepsize
if integer_dimensions is not None:
# Round integer steps to the nearest integer away from zero
dx_ints = dx[int_dims]
dx[int_dims] = np.copysign(np.ceil(np.abs(dx_ints)), dx_ints)
trial_point = to_grid(cur_point.point + dx)
trial_point[int_dims] = np.round(trial_point[int_dims])
# Don't reduce the stepsize via the stencil if step is only ints
if step.halvings > 0 and (dx[~int_dims] != 0).sum() == 0:
halvings = cur_point.halvings
else:
trial_point = to_grid(cur_point.point + dx)
if halvings > max_halvings:
continue
# TODO: check boundary constraints here
# if check_feasible is not None and not check_feasible(trial_point):
# continue
trial_point = Point(trial_point, halvings)
has_result = results.get(trial_point, False)
if stencil_index <= 2 * dims and (has_result is False or has_result is None):
contract_conditions.add(trial_point)
if has_result is False:
trial_point.parent = cur_point
trial_point.start_time = time()
submit_point(trial_point)
cur_added += 1
if max_tasks is not None and len(results) // (batchsize or 1) >= max_tasks:
is_finished = True
break
if is_contraction:
continue
# Collect all completed tasks, or wait for one if nothing else to do
if running:
block = (
len(running) >= max_queue_size
or (
next_point is None
and (is_finished or stencil_index >= max_stencil_size)
)
)
for future, result in client.next_batch(block=block):
stop_time = time()
points = running.pop(future)
if batchsize is None:
points = [points]
result = [result]
for point, cost in zip(points, result):
point.stop_time = stop_time
if next_point is None:
next_point = point
next_cost = cost
elif cost < next_cost:
processing.append((next_point, next_cost))
next_point = point
next_cost = cost
else:
processing.append((point, cost))
# Process all results
# Be greedy: the new point will be the result with the lowest cost.
# It's possible one could want a different policy to better explore
# around multiple minima.
if next_point is not None and (cur_added >= min_new_submit or stencil_index >= max_stencil_size or is_finished):
results[next_point] = next_cost
next_point.result = next_cost
contract_conditions.discard(next_point)
if next_cost < cur_cost:
next_point.is_accepted = True
# Orient the asymmetric stencil towards the expected direction
# of descent based on the old and new points and their parents.
# The stencil prefers negative directions, so this is the correct sign.
diff = (
(next_point.point - next_point.parent.point)
+ (cur_point.point - cur_point.parent.point)
)
orientation = np.where(diff, np.copysign(orientation, diff), orientation)
new_point = next_point
new_cost = next_cost
for point, result in processing:
results[point] = result
point.result = result
contract_conditions.discard(point)
if new_point is None and not contract_conditions and stencil_index > 2 * dims:
is_contraction = True
next_point = None
next_cost = None
processing[:] = []
elif next_point is None and stencil_index >= max_stencil_size and not running and not is_finished:
# Nothing running, nothing to process, and nothing to submit, so contract
is_contraction = True
for point in current_batch:
results.pop(point)
return cur_point, results
| {
"repo_name": "eriknw/dask-patternsearch",
"path": "dask_patternsearch/search.py",
"copies": "1",
"size": "15377",
"license": "bsd-3-clause",
"hash": 1635532030601539600,
"line_mean": 41.3608815427,
"line_max": 120,
"alpha_frac": 0.5785914027,
"autogenerated": false,
"ratio": 4.238423373759647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5317014776459648,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
import tempfile
import heapq
import inspect
import gzip
import bz2
import os
from glob import glob
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
from toolz import (merge, frequencies, merge_with, take, curry, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
pluck, groupby)
import toolz
from ..utils import tmpfile, ignoring
with ignoring(ImportError):
from cytoolz import (curry, frequencies, merge_with, join, reduceby,
count, pluck, groupby)
from pbag import PBag
from ..multiprocessing import get as mpget
from ..core import istask
from ..optimize import fuse, cull
from ..compatibility import apply, BytesIO, unicode
from ..context import _globals
names = ('bag-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head is list:
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def optimize(dsk, keys):
""" Optimize a dask from a dask.bag """
dsk2 = cull(dsk, keys)
dsk3 = fuse(dsk2)
dsk4 = lazify(dsk3)
return dsk4
def get(dsk, keys, get=None, **kwargs):
""" Get function for dask.bag """
get = get or _globals['get'] or mpget
dsk2 = optimize(dsk, keys)
return get(dsk2, keys, **kwargs)
_get = get
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
def to_textfiles(b, path, name_function=str):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("Path should be either\n"
"1. A list of paths -- ['foo.json', 'bar.json', ...]\n"
"2. A directory -- 'foo/\n"
"3. A path with a * in it -- 'foo.*.json'")
name = next(names)
dsk = dict(((name, i), (write, (b.name, i), path))
for i, path in enumerate(paths))
return Bag(merge(b.dask, dsk), name, b.npartitions)
class Item(object):
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
def compute(self, **kwargs):
return get(self.dask, self.key, **kwargs)
def apply(self, func):
name = next(names)
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = compute
class Bag(object):
""" Parallel collection of Python objects
Example
-------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.from_filenames('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def map(self, func):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
"""
name = next(names)
if takes_multiple_arguments(func):
func = curry(apply, func)
dsk = dict(((name, i), (list, (map, func, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = next(names)
dsk = dict(((name, i), (list, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
"""
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = next(names)
if isinstance(key, list):
key = (list2, key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
@classmethod
def from_sequence(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_sequence is deprecated.\n"
"Use db.from_sequence instead.")
@classmethod
def from_filenames(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_filenames is deprecated.\n"
"Use db.from_filenames instead.")
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str):
return to_textfiles(self, path, name_function)
def fold(self, binop, combine=None, initial=None):
""" Splittable reduction
Apply binary operator on each partition to perform reduce. Follow by a
second binary operator to combine results
>>> b = from_sequence(range(5))
>>> b.fold(lambda x, y: x + y).compute() # doctest: +SKIP
10
Optionally provide default arguments and special combine binary
operator
>>> b.fold(lambda x, y: x + y, lambda x, y: x + y, 0).compute() # doctest: +SKIP
10
"""
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
a = next(names)
b = next(names)
if key:
topk = curry(heapq.nlargest, key=key)
else:
topk = heapq.nlargest
dsk = dict(((a, i), (list, (topk, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (topk, k, (toolz.concat, list(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
a = next(names)
dsk = dict(((a, i), (set, key)) for i, key in enumerate(self._keys()))
b = next(names)
dsk2 = {(b, 0): (apply, set.union, (list2, list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def reduction(self, perpartition, aggregate):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
Example
-------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
@wraps(sum)
def sum(self):
return self.reduction(sum, sum)
@wraps(max)
def max(self):
return self.reduction(max, max)
@wraps(min)
def min(self):
return self.reduction(min, min)
@wraps(any)
def any(self):
return self.reduction(any, any)
@wraps(all)
def all(self):
return self.reduction(all, all)
def count(self):
""" Count the number of elements """
return self.reduction(count, sum)
def mean(self):
""" Arithmetic mean """
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(chunk, agg)
def var(self, ddof=0):
""" Variance """
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(chunk, agg)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return Bag(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.reduceby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
See also
--------
toolz.reduceby
pyspark.combineByKey
"""
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(toolz.concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(curry, reduce, combine),
list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = next(names)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def compute(self, **kwargs):
""" Force evaluation of bag """
results = get(self.dask, self._keys(), **kwargs)
if isinstance(results[0], Iterable):
results = toolz.concat(results)
if not isinstance(results, Iterator):
results = iter(results)
return results
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = next(names)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
__iter__ = compute
def groupby(self, grouper, npartitions=None):
""" Group collection by key function
Note that this requires full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
>>> b = from_sequence(range(10))
>>> dict(b.groupby(lambda x: x % 2 == 0)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
pbag
"""
if npartitions is None:
npartitions = self.npartitions
paths = [tempfile.mkdtemp('%d.pbag' % i) for i in range(npartitions)]
# Partition data on disk
name = next(names)
dsk1 = dict(((name, i),
(partition, grouper, (self.name, i), npartitions,
paths[i % len(paths)]))
for i in range(self.npartitions))
# Collect groups
name = next(names)
dsk2 = dict(((name, i),
(collect, grouper, npartitions, i, sorted(dsk1.keys())))
for i in range(npartitions))
return Bag(merge(self.dask, dsk1, dsk2), name, npartitions)
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = next(names)
DataFrame = curry(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions - 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def partition(grouper, sequence, npartitions, path):
""" Partition a bag along a grouper, store partitions on disk """
with PBag(grouper, npartitions, path) as pb:
pb.extend(sequence)
return pb
def collect(grouper, npartitions, group, pbags):
""" Collect partitions from disk and yield k,v group pairs """
from pbag import PBag
pbags = list(take(npartitions, pbags))
result = defaultdict(list)
for pb in pbags:
part = pb.get_partition(group)
groups = groupby(grouper, part)
for k, v in groups.items():
result[k].extend(v)
return list(result.items())
opens = {'gz': gzip.open, 'bz2': bz2.BZ2File}
def from_filenames(filenames):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = from_filenames('myfiles.*.txt') # doctest: +SKIP
See also:
from_sequence: A more generic bag creation function
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
if not filenames:
raise ValueError("No filenames found")
full_filenames = [os.path.abspath(f) for f in filenames]
extension = os.path.splitext(filenames[0])[1].strip('.')
myopen = opens.get(extension, open)
d = dict((('load', i), (list, (myopen, fn)))
for i, fn in enumerate(full_filenames))
return Bag(d, 'load', len(d))
def write(data, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
ext = os.path.splitext(filename)[1][1:]
if ext == 'gz':
f = gzip.open(filename, 'wb')
data = (line.encode() for line in data)
elif ext == 'bz2':
f = bz2.BZ2File(filename, 'wb')
data = (line.encode() for line in data)
else:
f = open(filename, 'w')
try:
for item in data:
f.write(item)
finally:
f.close()
def from_hdfs(path, hdfs=None, host='localhost', port='50070', user_name=None):
""" Create dask by loading in files from HDFS
Provide an hdfs directory and credentials
>>> b = from_hdfs('home/username/data/', host='localhost', user_name='ubuntu') # doctest: +SKIP
Alternatively provide an instance of ``pywebhdfs.webhdfs.PyWebHdfsClient``
>>> from pywebhdfs.webhdfs import PyWebHdfsClient # doctest: +SKIP
>>> hdfs = PyWebHdfsClient(host='hostname', user_name='username') # doctest: +SKIP
>>> b = from_hdfs('home/username/data/', hdfs=hdfs) # doctest: +SKIP
"""
from .. import hdfs_utils
filenames = hdfs_utils.filenames(hdfs, path)
if not filenames:
raise ValueError("No files found for path %s" % path)
name = next(names)
dsk = dict()
for i, fn in enumerate(filenames):
ext = fn.split('.')[-1]
if ext in ('gz', 'bz2'):
dsk[(name, i)] = (stream_decompress, ext, (hdfs.read_file, fn))
else:
dsk[(name, i)] = (hdfs.read_file, fn)
return Bag(dsk, name, len(dsk))
def stream_decompress(fmt, data):
""" Decompress a block of compressed bytes into a stream of strings """
if fmt == 'gz':
return gzip.GzipFile(fileobj=BytesIO(data))
if fmt == 'bz2':
return bz2_stream(data)
else:
return map(bytes.decode, BytesIO(data))
def bz2_stream(compressed, chunksize=100000):
""" Stream lines from a chunk of compressed bz2 data """
decompressor = bz2.BZ2Decompressor()
for i in range(0, len(compressed), chunksize):
chunk = compressed[i: i+chunksize]
decompressed = decompressor.decompress(chunk).decode()
for line in decompressed.split('\n'):
yield line
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See also:
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def takes_multiple_arguments(func):
"""
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> takes_multiple_arguments(map) # default to False
False
"""
try:
spec = inspect.getargspec(func)
except:
return False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) != 1
return len(spec.args) - len(spec.defaults) > 1
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = next(names)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key) for bag in bags
for key in sorted(bag.dask))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, func, *args, **kwargs):
return self._bag.map(lambda s: func(s, *args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, func))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
| {
"repo_name": "esc/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "29804",
"license": "bsd-3-clause",
"hash": -2592661406064875000,
"line_mean": 29.7574819401,
"line_max": 100,
"alpha_frac": 0.5401623943,
"autogenerated": false,
"ratio": 3.6817788758492895,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9716830343132004,
"avg_score": 0.0010221854034571064,
"num_lines": 969
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
import tempfile
import inspect
import gzip
import bz2
import os
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from glob import glob
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
from toolz import (merge, frequencies, merge_with, take, curry, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
pluck, groupby, topk)
import toolz
from ..utils import tmpfile, ignoring
with ignoring(ImportError):
from cytoolz import (curry, frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from pbag import PBag
from ..multiprocessing import get as mpget
from ..core import istask
from ..optimize import fuse, cull
from ..compatibility import apply, BytesIO, unicode
from ..context import _globals
names = ('bag-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def optimize(dsk, keys):
""" Optimize a dask from a dask.bag """
dsk2 = cull(dsk, keys)
dsk3 = fuse(dsk2)
dsk4 = lazify(dsk3)
return dsk4
def get(dsk, keys, get=None, **kwargs):
""" Get function for dask.bag """
get = get or _globals['get'] or mpget
dsk2 = optimize(dsk, keys)
return get(dsk2, keys, **kwargs)
_get = get
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
def to_textfiles(b, path, name_function=str):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("Path should be either\n"
"1. A list of paths -- ['foo.json', 'bar.json', ...]\n"
"2. A directory -- 'foo/\n"
"3. A path with a * in it -- 'foo.*.json'")
name = next(names)
dsk = dict(((name, i), (write, (b.name, i), path))
for i, path in enumerate(paths))
return Bag(merge(b.dask, dsk), name, b.npartitions)
class Item(object):
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
def compute(self, **kwargs):
return get(self.dask, self.key, **kwargs)
def apply(self, func):
name = next(names)
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = compute
class Bag(object):
""" Parallel collection of Python objects
Example
-------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.from_filenames('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def map(self, func):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
"""
name = next(names)
if takes_multiple_arguments(func):
func = curry(apply, func)
dsk = dict(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = next(names)
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
"""
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = next(names)
if isinstance(key, list):
key = (list2, key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
@classmethod
def from_sequence(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_sequence is deprecated.\n"
"Use db.from_sequence instead.")
@classmethod
def from_filenames(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_filenames is deprecated.\n"
"Use db.from_filenames instead.")
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str):
return to_textfiles(self, path, name_function)
def fold(self, binop, combine=None, initial=None):
""" Splittable reduction
Apply binary operator on each partition to perform reduce. Follow by a
second binary operator to combine results
>>> b = from_sequence(range(5))
>>> b.fold(lambda x, y: x + y).compute() # doctest: +SKIP
10
Optionally provide default arguments and special combine binary
operator
>>> b.fold(lambda x, y: x + y, lambda x, y: x + y, 0).compute() # doctest: +SKIP
10
"""
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
a = next(names)
b = next(names)
if key:
func = curry(topk, key=key)
else:
func = topk
dsk = dict(((a, i), (list, (func, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (func, k, (toolz.concat, list(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
a = next(names)
dsk = dict(((a, i), (set, key)) for i, key in enumerate(self._keys()))
b = next(names)
dsk2 = {(b, 0): (apply, set.union, (list2, list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def reduction(self, perpartition, aggregate):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
Example
-------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
@wraps(sum)
def sum(self):
return self.reduction(sum, sum)
@wraps(max)
def max(self):
return self.reduction(max, max)
@wraps(min)
def min(self):
return self.reduction(min, min)
@wraps(any)
def any(self):
return self.reduction(any, any)
@wraps(all)
def all(self):
return self.reduction(all, all)
def count(self):
""" Count the number of elements """
return self.reduction(count, sum)
def mean(self):
""" Arithmetic mean """
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(chunk, agg)
def var(self, ddof=0):
""" Variance """
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(chunk, agg)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return Bag(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.reduceby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
See also
--------
toolz.reduceby
pyspark.combineByKey
"""
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(toolz.concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(curry, reduce, combine),
list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = next(names)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def compute(self, **kwargs):
""" Force evaluation of bag """
results = get(self.dask, self._keys(), **kwargs)
if isinstance(results[0], Iterable):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = next(names)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, npartitions=None):
""" Group collection by key function
Note that this requires full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
>>> b = from_sequence(range(10))
>>> dict(b.groupby(lambda x: x % 2 == 0)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
pbag
"""
if npartitions is None:
npartitions = self.npartitions
paths = [tempfile.mkdtemp('%d.pbag' % i) for i in range(npartitions)]
# Partition data on disk
name = next(names)
dsk1 = dict(((name, i),
(partition, grouper, (self.name, i), npartitions,
paths[i % len(paths)]))
for i in range(self.npartitions))
# Collect groups
name = next(names)
dsk2 = dict(((name, i),
(collect, grouper, npartitions, i, sorted(dsk1.keys())))
for i in range(npartitions))
return Bag(merge(self.dask, dsk1, dsk2), name, npartitions)
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = next(names)
DataFrame = curry(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions - 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def partition(grouper, sequence, npartitions, path):
""" Partition a bag along a grouper, store partitions on disk """
with PBag(grouper, npartitions, path) as pb:
pb.extend(sequence)
return pb
def collect(grouper, npartitions, group, pbags):
""" Collect partitions from disk and yield k,v group pairs """
from pbag import PBag
pbags = list(take(npartitions, pbags))
result = defaultdict(list)
for pb in pbags:
part = pb.get_partition(group)
groups = groupby(grouper, part)
for k, v in groups.items():
result[k].extend(v)
return list(result.items())
opens = {'gz': gzip.open, 'bz2': bz2.BZ2File}
def from_filenames(filenames):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = from_filenames('myfiles.*.txt') # doctest: +SKIP
See also:
from_sequence: A more generic bag creation function
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
if not filenames:
raise ValueError("No filenames found")
full_filenames = [os.path.abspath(f) for f in filenames]
extension = os.path.splitext(filenames[0])[1].strip('.')
myopen = opens.get(extension, open)
d = dict((('load', i), (list, (myopen, fn)))
for i, fn in enumerate(full_filenames))
return Bag(d, 'load', len(d))
def write(data, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
ext = os.path.splitext(filename)[1][1:]
if ext == 'gz':
f = gzip.open(filename, 'wb')
data = (line.encode() for line in data)
elif ext == 'bz2':
f = bz2.BZ2File(filename, 'wb')
data = (line.encode() for line in data)
else:
f = open(filename, 'w')
try:
for item in data:
f.write(item)
finally:
f.close()
def from_hdfs(path, hdfs=None, host='localhost', port='50070', user_name=None):
""" Create dask by loading in files from HDFS
Provide an hdfs directory and credentials
>>> b = from_hdfs('home/username/data/', host='localhost', user_name='ubuntu') # doctest: +SKIP
Alternatively provide an instance of ``pywebhdfs.webhdfs.PyWebHdfsClient``
>>> from pywebhdfs.webhdfs import PyWebHdfsClient # doctest: +SKIP
>>> hdfs = PyWebHdfsClient(host='hostname', user_name='username') # doctest: +SKIP
>>> b = from_hdfs('home/username/data/', hdfs=hdfs) # doctest: +SKIP
"""
from .. import hdfs_utils
filenames = hdfs_utils.filenames(hdfs, path)
if not filenames:
raise ValueError("No files found for path %s" % path)
name = next(names)
dsk = dict()
for i, fn in enumerate(filenames):
ext = fn.split('.')[-1]
if ext in ('gz', 'bz2'):
dsk[(name, i)] = (stream_decompress, ext, (hdfs.read_file, fn))
else:
dsk[(name, i)] = (hdfs.read_file, fn)
return Bag(dsk, name, len(dsk))
def stream_decompress(fmt, data):
""" Decompress a block of compressed bytes into a stream of strings """
if fmt == 'gz':
return gzip.GzipFile(fileobj=BytesIO(data))
if fmt == 'bz2':
return bz2_stream(data)
else:
return map(bytes.decode, BytesIO(data))
def bz2_stream(compressed, chunksize=100000):
""" Stream lines from a chunk of compressed bz2 data """
decompressor = bz2.BZ2Decompressor()
for i in range(0, len(compressed), chunksize):
chunk = compressed[i: i+chunksize]
decompressed = decompressor.decompress(chunk).decode()
for line in decompressed.split('\n'):
yield line
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See also:
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/ContinuumIO/dask/master/README.rst')
>>> a.npartitions
1
>>> a.take(8)
('Dask\n',
'====\n',
'\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\n',
'\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\n')
>>> b = from_url(['http://github.com', 'http://google.com'])
>>> b.npartions
2
"""
if isinstance(urls, str):
urls = [urls]
name = next(load_names)
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def takes_multiple_arguments(func):
"""
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> takes_multiple_arguments(map) # default to False
False
"""
try:
spec = inspect.getargspec(func)
except:
return False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) != 1
return len(spec.args) - len(spec.defaults) > 1
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = next(names)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key) for bag in bags
for key in sorted(bag.dask))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
| {
"repo_name": "marianotepper/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "30985",
"license": "bsd-3-clause",
"hash": 6790862722747664000,
"line_mean": 29.6175889328,
"line_max": 100,
"alpha_frac": 0.5421332903,
"autogenerated": false,
"ratio": 3.6720786916330885,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9709097960570943,
"avg_score": 0.0010228042724291055,
"num_lines": 1012
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
import tempfile
import inspect
import gzip
import zlib
import bz2
import os
import codecs
from sys import getdefaultencoding
from fnmatch import fnmatchcase
from glob import glob
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
from ..utils import ignoring
from toolz import (merge, frequencies, merge_with, take, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
remove, pluck, groupby, topk)
import toolz
with ignoring(ImportError):
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..base import Base, normalize_token
from ..compatibility import (apply, BytesIO, unicode, urlopen, urlparse,
StringIO)
from ..core import list2, quote, istask, get_dependencies, reverse_dict
from ..multiprocessing import get as mpget
from ..optimize import fuse, cull, inline
from ..utils import (tmpfile, file_size, textblock,
takes_multiple_arguments)
names = ('bag-%d' % i for i in itertools.count(1))
tokens = ('-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
system_encoding = getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items() if istask(v) and v
and v[0] is list
and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
def optimize(dsk, keys, **kwargs):
""" Optimize a dask from a dask.bag """
dsk2 = cull(dsk, keys)
dsk3 = fuse(dsk2)
dsk4 = inline_singleton_lists(dsk3)
dsk5 = lazify(dsk4)
return dsk5
def to_textfiles(b, path, name_function=str, encoding=system_encoding):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("Path should be either\n"
"1. A list of paths -- ['foo.json', 'bar.json', ...]\n"
"2. A directory -- 'foo/\n"
"3. A path with a * in it -- 'foo.*.json'")
name = next(names)
dsk = dict(((name, i), (write, (b.name, i), path, encoding))
for i, path in enumerate(paths))
return Bag(merge(b.dask, dsk), name, b.npartitions)
def finalize(bag, results):
if isinstance(bag, Item):
return results[0]
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
def _keys(self):
return [self.key]
def apply(self, func):
name = next(names)
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
class Bag(Base):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.from_filenames('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def map(self, func):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
"""
name = next(names)
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = dict(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = next(names)
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = next(names)
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
"""
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = next(names)
key = quote(key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@classmethod
def from_sequence(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_sequence is deprecated.\n"
"Use db.from_sequence instead.")
@classmethod
def from_filenames(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_filenames is deprecated.\n"
"Use db.from_filenames instead.")
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str, encoding=system_encoding):
return to_textfiles(self, path, name_function, encoding)
def fold(self, binop, combine=None, initial=no_default):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
a = next(names)
b = next(names)
initial = quote(initial)
if initial is not no_default:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
a = next(names)
b = next(names)
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, key=key)
else:
func = topk
dsk = dict(((a, i), (list, (func, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (func, k, (toolz.concat, list(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
a = next(names)
dsk = dict(((a, i), (set, key)) for i, key in enumerate(self._keys()))
b = next(names)
dsk2 = {(b, 0): (apply, set.union, quote(list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def reduction(self, perpartition, aggregate):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
@wraps(sum)
def sum(self):
return self.reduction(sum, sum)
@wraps(max)
def max(self):
return self.reduction(max, max)
@wraps(min)
def min(self):
return self.reduction(min, min)
@wraps(any)
def any(self):
return self.reduction(any, any)
@wraps(all)
def all(self):
return self.reduction(all, all)
def count(self):
""" Count the number of elements """
return self.reduction(count, sum)
def mean(self):
""" Arithmetic mean """
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(chunk, agg)
def var(self, ddof=0):
""" Variance """
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(chunk, agg)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(toolz.concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = next(names)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = next(names)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, npartitions=None, blocksize=2**20):
""" Group collection by key function
Note that this requires full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
>>> b = from_sequence(range(10))
>>> dict(b.groupby(lambda x: x % 2 == 0)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if npartitions is None:
npartitions = self.npartitions
import partd
p = ('partd' + next(tokens),)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = next(names)
dsk2 = dict(((name, i),
(partition, grouper, (self.name, i),
npartitions, p, blocksize))
for i in range(self.npartitions))
# Barrier
barrier_token = 'barrier' + next(tokens)
def barrier(args): return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = next(names)
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(self)(merge(self.dask, dsk1, dsk2, dsk3, dsk4), name, npartitions)
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = next(names)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
normalize_token.register(Item, lambda a: a.key)
normalize_token.register(Bag, lambda a: a.name)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def decode_sequence(encoding, seq):
for item in seq:
yield item.decode(encoding)
opens = {'gz': gzip.open, 'bz2': bz2.BZ2File}
def from_filenames(filenames, chunkbytes=None, encoding=system_encoding):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = from_filenames('myfiles.*.txt') # doctest: +SKIP
Parallelize a large files by providing the number of uncompressed bytes to
load into each partition.
>>> b = from_filenames('largefile.txt', chunkbytes=1e7) # doctest: +SKIP
See Also
--------
from_sequence: A more generic bag creation function
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
if not filenames:
raise ValueError("No filenames found")
full_filenames = [os.path.abspath(f) for f in filenames]
name = 'from-filename' + next(tokens)
if chunkbytes:
chunkbytes = int(chunkbytes)
taskss = [_chunk_read_file(fn, chunkbytes, encoding) for fn in full_filenames]
d = dict(((name, i), task)
for i, task in enumerate(toolz.concat(taskss)))
else:
extension = os.path.splitext(filenames[0])[1].strip('.')
myopen = opens.get(extension, open)
d = dict(((name, i), (list, (decode_sequence, encoding, (myopen, fn, 'rb'))))
for i, fn in enumerate(full_filenames))
return Bag(d, name, len(d))
def _chunk_read_file(filename, chunkbytes, encoding):
extension = os.path.splitext(filename)[1].strip('.')
compression = {'gz': 'gzip', 'bz2': 'bz2'}.get(extension, None)
return [(list, (StringIO, (bytes.decode,
(textblock, filename, i, i + chunkbytes, compression), encoding)))
for i in range(0, file_size(filename, compression), chunkbytes)]
def write(data, filename, encoding):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
ext = os.path.splitext(filename)[1][1:]
if ext == 'gz':
f = gzip.open(filename, 'wb')
data = (line.encode(encoding) for line in data)
elif ext == 'bz2':
f = bz2.BZ2File(filename, 'wb')
data = (line.encode(encoding) for line in data)
else:
f = codecs.open(filename, 'wb', encoding=encoding)
try:
for item in data:
f.write(item)
finally:
f.close()
def _get_s3_bucket(bucket_name, aws_access_key, aws_secret_key, connection,
anon):
"""Connect to s3 and return a bucket"""
import boto
if anon is True:
connection = boto.connect_s3(anon=anon)
elif connection is None:
connection = boto.connect_s3(aws_access_key, aws_secret_key)
return connection.get_bucket(bucket_name)
# we need an unmemoized function to call in the main thread. And memoized
# functions for the dask.
_memoized_get_bucket = toolz.memoize(_get_s3_bucket)
def _get_key(bucket_name, conn_args, key_name):
bucket = _memoized_get_bucket(bucket_name, *conn_args)
key = bucket.get_key(key_name)
ext = key_name.split('.')[-1]
return stream_decompress(ext, key.read())
def _parse_s3_URI(bucket_name, paths):
from ..compatibility import quote, unquote
assert bucket_name.startswith('s3://')
o = urlparse('s3://' + quote(bucket_name[len('s3://'):]))
# if path is specified
if (paths == '*') and (o.path != '' and o.path != '/'):
paths = unquote(o.path[1:])
bucket_name = unquote(o.hostname)
return bucket_name, paths
def from_s3(bucket_name, paths='*', aws_access_key=None, aws_secret_key=None,
connection=None, anon=False):
""" Create a Bag by loading textfiles from s3
Each line will be treated as one element and each file in S3 as one
partition.
You may specify a full s3 bucket
>>> b = from_s3('s3://bucket-name') # doctest: +SKIP
Or select files, lists of files, or globstrings of files within that bucket
>>> b = from_s3('s3://bucket-name', 'myfile.json') # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', ['alice.json', 'bob.json']) # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', '*.json') # doctest: +SKIP
"""
conn_args = (aws_access_key, aws_secret_key, connection, anon)
bucket_name, paths = normalize_s3_names(bucket_name, paths, conn_args)
get_key = partial(_get_key, bucket_name, conn_args)
name = next(load_names)
dsk = dict(((name, i), (list, (get_key, k))) for i, k in enumerate(paths))
return Bag(dsk, name, len(paths))
def normalize_s3_names(bucket_name, paths, conn_args):
""" Normalize bucket name and paths """
if bucket_name.startswith('s3://'):
bucket_name, paths = _parse_s3_URI(bucket_name, paths)
if isinstance(paths, str):
if ('*' not in paths) and ('?' not in paths):
return bucket_name, [paths]
else:
bucket = _get_s3_bucket(bucket_name, *conn_args)
keys = bucket.list() # handle globs
matches = [k.name for k in keys if fnmatchcase(k.name, paths)]
return bucket_name, matches
else:
return bucket_name, paths
def from_hdfs(path, hdfs=None, host='localhost', port='50070', user_name=None):
""" Create dask by loading in files from HDFS
Provide an hdfs directory and credentials
>>> b = from_hdfs('home/username/data/', host='localhost', user_name='ubuntu') # doctest: +SKIP
Alternatively provide an instance of ``pywebhdfs.webhdfs.PyWebHdfsClient``
>>> from pywebhdfs.webhdfs import PyWebHdfsClient # doctest: +SKIP
>>> hdfs = PyWebHdfsClient(host='hostname', user_name='username') # doctest: +SKIP
>>> b = from_hdfs('home/username/data/', hdfs=hdfs) # doctest: +SKIP
"""
from .. import hdfs_utils
filenames = hdfs_utils.filenames(hdfs, path)
if not filenames:
raise ValueError("No files found for path %s" % path)
name = next(names)
dsk = dict()
for i, fn in enumerate(filenames):
ext = fn.split('.')[-1]
if ext in ('gz', 'bz2'):
dsk[(name, i)] = (stream_decompress, ext, (hdfs.read_file, fn))
else:
dsk[(name, i)] = (hdfs.read_file, fn)
return Bag(dsk, name, len(dsk))
def stream_decompress(fmt, data):
""" Decompress a block of compressed bytes into a stream of strings """
if fmt == 'gz':
return gzip.GzipFile(fileobj=BytesIO(data))
if fmt == 'bz2':
return bz2_stream(data)
else:
return map(bytes.decode, BytesIO(data))
def bz2_stream(compressed, chunksize=100000):
""" Stream lines from a chunk of compressed bz2 data """
decompressor = bz2.BZ2Decompressor()
for i in range(0, len(compressed), chunksize):
chunk = compressed[i: i+chunksize]
decompressed = decompressor.decompress(chunk).decode()
for line in decompressed.split('\n'):
yield line + '\n'
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_castra(x, columns=None, index=False):
"""Load a dask Bag from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
index: bool, optional
If True, the index is included as the first element in each tuple.
Default is False.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
elif not x._readonly:
x = Castra(x.path, readonly=True)
if columns is None:
columns = x.columns
name = 'from-castra-' + next(tokens)
dsk = dict(((name, i), (load_castra_partition, x, part, columns, index))
for i, part in enumerate(x.partitions))
return Bag(dsk, name, len(x.partitions))
def load_castra_partition(castra, part, columns, index):
import blosc
# Due to serialization issues, blosc needs to be manually initialized in
# each process.
blosc.init()
df = castra.load_partition(part, columns)
if isinstance(columns, list):
items = df.itertuples(index)
else:
items = df.iteritems() if index else iter(df)
items = list(items)
if (items and isinstance(items[0], tuple)
and type(items[0]) is not tuple):
names = items[0]._fields
items = [dict(zip(names, item)) for item in items]
return items
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/blaze/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>> a.take(8) # doctest: +SKIP
('Dask\n',
'====\n',
'\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\n',
'\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = next(load_names)
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = next(names)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key) for bag in bags
for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
| {
"repo_name": "vikhyat/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "40406",
"license": "bsd-3-clause",
"hash": -7639128239195336000,
"line_mean": 31.2216905901,
"line_max": 103,
"alpha_frac": 0.558803148,
"autogenerated": false,
"ratio": 3.692406104358951,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9746644309971035,
"avg_score": 0.0009129884775830875,
"num_lines": 1254
} |
from __future__ import absolute_import, division, print_function
import itertools
import math
import tempfile
import inspect
import gzip
import zlib
import bz2
import os
from fnmatch import fnmatchcase
from glob import glob
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
from dask.utils import takes_multiple_arguments
from toolz import (merge, frequencies, merge_with, take, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
remove, pluck, groupby, topk)
import toolz
from ..utils import tmpfile, ignoring, file_size, textblock
with ignoring(ImportError):
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..multiprocessing import get as mpget
from ..core import istask, get_dependencies, reverse_dict
from ..optimize import fuse, cull, inline
from ..compatibility import (apply, BytesIO, unicode, urlopen, urlparse, quote,
unquote, StringIO)
from ..base import Base
names = ('bag-%d' % i for i in itertools.count(1))
tokens = ('-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items() if istask(v) and v
and v[0] is list
and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
def optimize(dsk, keys):
""" Optimize a dask from a dask.bag """
dsk2 = cull(dsk, keys)
dsk3 = fuse(dsk2)
dsk4 = inline_singleton_lists(dsk3)
dsk5 = lazify(dsk4)
return dsk5
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
def to_textfiles(b, path, name_function=str):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("Path should be either\n"
"1. A list of paths -- ['foo.json', 'bar.json', ...]\n"
"2. A directory -- 'foo/\n"
"3. A path with a * in it -- 'foo.*.json'")
name = next(names)
dsk = dict(((name, i), (write, (b.name, i), path))
for i, path in enumerate(paths))
return Bag(merge(b.dask, dsk), name, b.npartitions)
def finalize(bag, results):
if isinstance(bag, Item):
return results[0]
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
def _keys(self):
return [self.key]
def apply(self, func):
name = next(names)
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
class Bag(Base):
""" Parallel collection of Python objects
Example
-------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.from_filenames('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def map(self, func):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
"""
name = next(names)
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = dict(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = next(names)
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = next(names)
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
"""
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = next(names)
if isinstance(key, list):
key = (list2, key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@classmethod
def from_sequence(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_sequence is deprecated.\n"
"Use db.from_sequence instead.")
@classmethod
def from_filenames(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_filenames is deprecated.\n"
"Use db.from_filenames instead.")
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str):
return to_textfiles(self, path, name_function)
def fold(self, binop, combine=None, initial=None):
""" Splittable reduction
Apply binary operator on each partition to perform reduce. Follow by a
second binary operator to combine results
>>> b = from_sequence(range(5))
>>> b.fold(lambda x, y: x + y).compute() # doctest: +SKIP
10
Optionally provide default arguments and special combine binary
operator
>>> b.fold(lambda x, y: x + y, lambda x, y: x + y, 0).compute() # doctest: +SKIP
10
"""
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
a = next(names)
b = next(names)
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, key=key)
else:
func = topk
dsk = dict(((a, i), (list, (func, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (func, k, (toolz.concat, list(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
a = next(names)
dsk = dict(((a, i), (set, key)) for i, key in enumerate(self._keys()))
b = next(names)
dsk2 = {(b, 0): (apply, set.union, (list2, list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def reduction(self, perpartition, aggregate):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
Example
-------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
@wraps(sum)
def sum(self):
return self.reduction(sum, sum)
@wraps(max)
def max(self):
return self.reduction(max, max)
@wraps(min)
def min(self):
return self.reduction(min, min)
@wraps(any)
def any(self):
return self.reduction(any, any)
@wraps(all)
def all(self):
return self.reduction(all, all)
def count(self):
""" Count the number of elements """
return self.reduction(count, sum)
def mean(self):
""" Arithmetic mean """
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(chunk, agg)
def var(self, ddof=0):
""" Variance """
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(chunk, agg)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
Key Function
------------
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
Binops
------
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``.
Binop takes a running total and a new element and produces a new total
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See also
--------
toolz.reduceby
pyspark.combineByKey
"""
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(toolz.concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = next(names)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = next(names)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, npartitions=None, blocksize=2**20):
""" Group collection by key function
Note that this requires full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
>>> b = from_sequence(range(10))
>>> dict(b.groupby(lambda x: x % 2 == 0)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if npartitions is None:
npartitions = self.npartitions
import partd
p = ('partd' + next(tokens),)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = next(names)
dsk2 = dict(((name, i),
(partition, grouper, (self.name, i),
npartitions, p, blocksize))
for i in range(self.npartitions))
# Barrier
barrier_token = 'barrier' + next(tokens)
def barrier(args): return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = next(names)
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(self)(merge(self.dask, dsk1, dsk2, dsk3, dsk4), name, npartitions)
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = next(names)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
opens = {'gz': gzip.open, 'bz2': bz2.BZ2File}
def from_filenames(filenames, chunkbytes=None):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = from_filenames('myfiles.*.txt') # doctest: +SKIP
Parallelize a large files by providing the number of uncompressed bytes to
load into each partition.
>>> b = from_filenames('largefile.txt', chunkbytes=1e7) # doctest: +SKIP
See also:
from_sequence: A more generic bag creation function
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
if not filenames:
raise ValueError("No filenames found")
full_filenames = [os.path.abspath(f) for f in filenames]
name = 'from-filename' + next(tokens)
if chunkbytes:
chunkbytes = int(chunkbytes)
taskss = [_chunk_read_file(fn, chunkbytes) for fn in full_filenames]
d = dict(((name, i), task)
for i, task in enumerate(toolz.concat(taskss)))
else:
extension = os.path.splitext(filenames[0])[1].strip('.')
myopen = opens.get(extension, open)
d = dict(((name, i), (list, (myopen, fn)))
for i, fn in enumerate(full_filenames))
return Bag(d, name, len(d))
def _chunk_read_file(filename, chunkbytes):
extension = os.path.splitext(filename)[1].strip('.')
compression = {'gz': 'gzip', 'bz2': 'bz2'}.get(extension, None)
return [(list, (StringIO, (bytes.decode,
(textblock, filename, i, i + chunkbytes, compression))))
for i in range(0, file_size(filename, compression), chunkbytes)]
def write(data, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
ext = os.path.splitext(filename)[1][1:]
if ext == 'gz':
f = gzip.open(filename, 'wb')
data = (line.encode() for line in data)
elif ext == 'bz2':
f = bz2.BZ2File(filename, 'wb')
data = (line.encode() for line in data)
else:
f = open(filename, 'w')
try:
for item in data:
f.write(item)
finally:
f.close()
def _get_s3_bucket(bucket_name, aws_access_key, aws_secret_key, connection,
anon):
"""Connect to s3 and return a bucket"""
import boto
if anon is True:
connection = boto.connect_s3(anon=anon)
elif connection is None:
connection = boto.connect_s3(aws_access_key, aws_secret_key)
return connection.get_bucket(bucket_name)
# we need an unmemoized function to call in the main thread. And memoized
# functions for the dask.
_memoized_get_bucket = toolz.memoize(_get_s3_bucket)
def _get_key(bucket_name, conn_args, key_name):
bucket = _memoized_get_bucket(bucket_name, *conn_args)
key = bucket.get_key(key_name)
ext = key_name.split('.')[-1]
return stream_decompress(ext, key.read())
def _parse_s3_URI(bucket_name, paths):
assert bucket_name.startswith('s3://')
o = urlparse('s3://' + quote(bucket_name[len('s3://'):]))
# if path is specified
if (paths == '*') and (o.path != '' and o.path != '/'):
paths = unquote(o.path[1:])
bucket_name = unquote(o.hostname)
return bucket_name, paths
def from_s3(bucket_name, paths='*', aws_access_key=None, aws_secret_key=None,
connection=None, anon=False):
""" Create a Bag by loading textfiles from s3
Each line will be treated as one element and each file in S3 as one
partition.
You may specify a full s3 bucket
>>> b = from_s3('s3://bucket-name') # doctest: +SKIP
Or select files, lists of files, or globstrings of files within that bucket
>>> b = from_s3('s3://bucket-name', 'myfile.json') # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', ['alice.json', 'bob.json']) # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', '*.json') # doctest: +SKIP
"""
conn_args = (aws_access_key, aws_secret_key, connection, anon)
bucket_name, paths = normalize_s3_names(bucket_name, paths, conn_args)
get_key = partial(_get_key, bucket_name, conn_args)
name = next(load_names)
dsk = dict(((name, i), (list, (get_key, k))) for i, k in enumerate(paths))
return Bag(dsk, name, len(paths))
def normalize_s3_names(bucket_name, paths, conn_args):
""" Normalize bucket name and paths """
if bucket_name.startswith('s3://'):
bucket_name, paths = _parse_s3_URI(bucket_name, paths)
if isinstance(paths, str):
if ('*' not in paths) and ('?' not in paths):
return bucket_name, [paths]
else:
bucket = _get_s3_bucket(bucket_name, *conn_args)
keys = bucket.list() # handle globs
matches = [k.name for k in keys if fnmatchcase(k.name, paths)]
return bucket_name, matches
else:
return bucket_name, paths
def from_hdfs(path, hdfs=None, host='localhost', port='50070', user_name=None):
""" Create dask by loading in files from HDFS
Provide an hdfs directory and credentials
>>> b = from_hdfs('home/username/data/', host='localhost', user_name='ubuntu') # doctest: +SKIP
Alternatively provide an instance of ``pywebhdfs.webhdfs.PyWebHdfsClient``
>>> from pywebhdfs.webhdfs import PyWebHdfsClient # doctest: +SKIP
>>> hdfs = PyWebHdfsClient(host='hostname', user_name='username') # doctest: +SKIP
>>> b = from_hdfs('home/username/data/', hdfs=hdfs) # doctest: +SKIP
"""
from .. import hdfs_utils
filenames = hdfs_utils.filenames(hdfs, path)
if not filenames:
raise ValueError("No files found for path %s" % path)
name = next(names)
dsk = dict()
for i, fn in enumerate(filenames):
ext = fn.split('.')[-1]
if ext in ('gz', 'bz2'):
dsk[(name, i)] = (stream_decompress, ext, (hdfs.read_file, fn))
else:
dsk[(name, i)] = (hdfs.read_file, fn)
return Bag(dsk, name, len(dsk))
def stream_decompress(fmt, data):
""" Decompress a block of compressed bytes into a stream of strings """
if fmt == 'gz':
return gzip.GzipFile(fileobj=BytesIO(data))
if fmt == 'bz2':
return bz2_stream(data)
else:
return map(bytes.decode, BytesIO(data))
def bz2_stream(compressed, chunksize=100000):
""" Stream lines from a chunk of compressed bz2 data """
decompressor = bz2.BZ2Decompressor()
for i in range(0, len(compressed), chunksize):
chunk = compressed[i: i+chunksize]
decompressed = decompressor.decompress(chunk).decode()
for line in decompressed.split('\n'):
yield line + '\n'
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See also:
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/ContinuumIO/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>> a.take(8) # doctest: +SKIP
('Dask\n',
'====\n',
'\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\n',
'\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = next(load_names)
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = next(names)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key) for bag in bags
for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
| {
"repo_name": "jayhetee/dask",
"path": "dask/bag/core.py",
"copies": "1",
"size": "37332",
"license": "bsd-3-clause",
"hash": -7217649217156482000,
"line_mean": 31.0446351931,
"line_max": 109,
"alpha_frac": 0.5524215151,
"autogenerated": false,
"ratio": 3.6736862822279077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47261077973279075,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import numpy as np
from dask_patternsearch.search import RightHandedSimplexStencil
from toolz import take
import gizeh
import moviepy.editor as mpy
# DB16 - DawnBringer's 16 Col Palette v1.0
# http://pixeljoint.com/forum/forum_posts.asp?TID=12795
colors = [
[20, 12, 28],
[68, 36, 52],
[48, 52, 109],
[78, 74, 78],
[133, 76, 48],
[52, 101, 36],
[208, 70, 72],
[117, 113, 97],
[89, 125, 206],
[210, 125, 44],
[133, 149, 161],
[109, 170, 44],
[210, 170, 153],
[109, 194, 202],
[218, 212, 94],
[222, 238, 214],
]
colors = [[x / 255 for x in color] for color in colors]
halving_colors = {
-5: colors[1],
-4: colors[1],
-3: colors[1],
-2: colors[1],
-1: colors[6],
0: colors[8],
1: colors[11],
2: colors[5],
3: colors[0],
4: colors[0],
5: colors[0],
6: colors[0],
7: colors[0],
}
grid_color = colors[15]
def make_frames(frames, width, scale):
incrementer = itertools.count()
stencil = RightHandedSimplexStencil(2, 30)
rotate = np.array([1, -1])
offset = width / 2 + rotate * width / 10
points = list(take(frames, stencil.generate_stencil_points()))
for point in points:
point.point = rotate * point.point * width / 12 + offset
def make_frame(t):
i = next(incrementer)
surface = gizeh.Surface(width=width, height=width, bg_color=(1, 1, 1))
line = gizeh.polyline([[offset[0], 0], [offset[0], width]], stroke=grid_color, stroke_width=2)
line.draw(surface)
line = gizeh.polyline([[0, offset[1]], [width, offset[1]]], stroke=grid_color, stroke_width=2)
line.draw(surface)
x = offset[0] + width/scale
y = offset[1] - width/scale
while x <= width + 1:
line = gizeh.polyline([[x, 0], [x, width]], stroke=grid_color, stroke_width=0.5)
line.draw(surface)
line = gizeh.polyline([[0, y], [width, y]], stroke=grid_color, stroke_width=0.5)
line.draw(surface)
x += width/scale
y -= width/scale
x = offset[0] - width/scale
y = offset[1] + width/scale
while x >= -1:
line = gizeh.polyline([[x, 0], [x, width]], stroke=grid_color, stroke_width=0.5)
line.draw(surface)
line = gizeh.polyline([[0, y], [width, y]], stroke=grid_color, stroke_width=0.5)
line.draw(surface)
x -= width/scale
y += width/scale
circle = gizeh.circle(r=3.25, xy=offset, fill=halving_colors[0])
circle.draw(surface)
if i > 0:
for i in range(i-1):
point = points[i]
color = halving_colors[point.halvings]
circle = gizeh.circle(r=max(0.5, 3.25 - 0.75*point.halvings), xy=point.point, fill=color)
circle.draw(surface)
return surface.get_npimage()
return make_frame
def make_gif(frames, fps=8, width=320, scale=11, filename='stencil.gif'):
clip = mpy.VideoClip(make_frame=make_frames(frames, width, scale), duration=frames / fps)
clip.write_gif(filename, fps=fps)
if __name__ == '__main__':
make_gif(120, filename='stencil120-orig.gif')
print('\n"stencil120-orig.gif" written. I highly recommend optimizing it with gifsicle:\n')
print('gifsicle --colors=256 -O2 stencil120-orig.gif -o stencil120.gif\n')
| {
"repo_name": "eriknw/dask-patternsearch",
"path": "tools/stencilgif.py",
"copies": "1",
"size": "3471",
"license": "bsd-3-clause",
"hash": 3038349069889144000,
"line_mean": 30.8440366972,
"line_max": 105,
"alpha_frac": 0.577355229,
"autogenerated": false,
"ratio": 3.0287958115183247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41061510405183244,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
from datashape import DataShape, iscollection
from toolz import curry, concat
from .collections import Shift
from .core import Node
from .expressions import Field, Map, ElemWise, symbol, shape, Coerce
from .arithmetic import maxshape, UnaryOp, BinOp
from .strings import Like, StrCat
from .datetime import DateTime
__all__ = ['broadcast', 'Broadcast', 'scalar_symbols', 'broadcast_collect']
def broadcast(expr, leaves, scalars=None):
scalars = scalars or scalar_symbols(leaves)
assert len(scalars) == len(leaves)
return Broadcast(tuple(leaves),
tuple(scalars),
expr._subs(dict(zip(leaves, scalars))))
class Broadcast(ElemWise):
""" Fuse scalar expressions over collections
Given elementwise operations on collections, e.g.
>>> from blaze import sin
>>> a = symbol('a', '100 * int')
>>> t = symbol('t', '100 * {x: int, y: int}')
>>> expr = sin(a) + t.y**2
It may be best to represent this as a scalar expression mapped over a
collection
>>> sa = symbol('a', 'int')
>>> st = symbol('t', '{x: int, y: int}')
>>> sexpr = sin(sa) + st.y**2
>>> expr = Broadcast((a, t), (sa, st), sexpr)
This provides opportunities for optimized computation.
In practice, expressions are often collected into Broadcast expressions
automatically. This class is mainly intented for internal use.
"""
_arguments = '_children', '_scalars', '_scalar_expr'
def _dshape(self):
myshape = maxshape(map(shape, self._children))
return DataShape(*(myshape + (self._scalar_expr.schema,)))
@property
def _inputs(self):
return self._children
@property
def _name(self):
return self._scalar_expr._name
@property
def _full_expr(self):
return self._scalar_expr._subs(
dict(zip(self._scalars, self._children))
)
def _traverse(self):
for item in super(Broadcast, self)._traverse():
yield item
# also yield all the items in the traversal of our full expression
# without yielding the full expression itself.
for item in itertools.islice(self._full_expr._traverse(), 1, None):
yield item
def scalar_symbols(exprs):
"""
Gives a sequence of scalar symbols to mirror these expressions
Examples
--------
>>> x = symbol('x', '5 * 3 * int32')
>>> y = symbol('y', '5 * 3 * int32')
>>> xx, yy = scalar_symbols([x, y])
>>> xx._name, xx.dshape
('x', dshape("int32"))
>>> yy._name, yy.dshape
('y', dshape("int32"))
"""
new_names = ('_%d' % i for i in itertools.count(1))
scalars = []
names = set()
for expr in exprs:
if expr._name and expr._name not in names:
name = expr._name
names.add(name)
else:
name = next(new_names)
s = symbol(name, expr.schema)
scalars.append(s)
return scalars
Broadcastable = (Map, Field, DateTime, UnaryOp, BinOp, Coerce, Shift, Like, StrCat)
WantToBroadcast = (Map, DateTime, UnaryOp, BinOp, Coerce, Shift, Like, StrCat)
def broadcast_collect(expr,
broadcastable=Broadcastable,
want_to_broadcast=WantToBroadcast,
no_recurse=None):
""" Collapse expression down using Broadcast - Tabular cases only
Expressions of type Broadcastables are swallowed into Broadcast
operations
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> expr = (t.x + 2*t.y).distinct()
>>> broadcast_collect(expr)
distinct(Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * t.y)))
>>> from blaze import exp
>>> expr = t.x + 2 * exp(-(t.x - 1.3) ** 2)
>>> broadcast_collect(expr)
Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * (exp(-((t.x - 1.3) ** 2)))))
"""
if (isinstance(expr, want_to_broadcast) and
iscollection(expr.dshape)):
leaves = leaves_of_type(broadcastable, expr)
expr = broadcast(expr, sorted(leaves, key=str))
if no_recurse is not None and isinstance(expr, no_recurse):
return expr
# Recurse down
children = (
broadcast_collect(i, broadcastable, want_to_broadcast, no_recurse)
for i in expr._inputs
)
return expr._subs({e: c for e, c in zip(expr._inputs, children)})
@curry
def leaves_of_type(types, expr):
""" Leaves of an expression skipping all operations of type ``types``
"""
if not isinstance(expr, types):
return set([expr])
else:
return set.union(*map(leaves_of_type(types), expr._inputs))
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/broadcast.py",
"copies": "3",
"size": "4761",
"license": "bsd-3-clause",
"hash": -5249906757919316000,
"line_mean": 28.3888888889,
"line_max": 97,
"alpha_frac": 0.604494854,
"autogenerated": false,
"ratio": 3.6821345707656614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5786629424765661,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
from llvm.core import Constant
from .llvm_array import (auto_const_intp,
intp_type,
store_at, load_at, get_shape_ptr, get_data_ptr,
get_strides_ptr, isinteger, isiterable,
STRIDED)
def _check_N(N):
if N is None:
raise ValueError("negative integers not supported")
def adjust_slice(key, N=None):
start = key.start
if start is None:
start = 0
if start < 0:
_check_N(N)
while start < 0:
start += N
stop = key.stop
if stop is None:
_check_N(N)
stop = N
if stop < 0:
_check_N(N)
while stop < 0:
stop += N
step = key.step
if step is None:
step = 1
return start, stop, step
# STRIDED
def Sarr_from_S(arr, key):
raise NotImplementedError
def Sarr_from_S_slice(arr, start, stop, step):
raise NotImplementedError
def from_S_int(arr, index):
return from_S_ints(arr, (index,))
def from_S_ints(arr, key):
raise NotImplementedError
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_S_slice(arr, start, end):
raise NotImplementedError
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# FORTRAN CONTIGUOUS
def Sarr_from_F(arr, key):
raise NotImplementedError
def Sarr_from_F_slice(arr, start, stop, step):
raise NotImplementedError
def from_F_int(arr, index):
return from_F_ints(arr, (index,))
# key will be *just* the final integers to extract
# so that resulting array stays F_CONTIGUOUS
def from_F_ints(arr, key):
raise NotImplementedError
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_F_slice(arr, start, end):
raise NotImplementedError
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# C-CONTIGUOUS
def Sarr_from_C(arr, key):
raise NotImplementedError
def Sarr_from_C_slice(arr, start, stop, step):
builder = arr.builder
new = arr.getview(kind=STRIDED)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
newstrides = get_strides_ptr(bulder, new.array_ptr)
if all(hasattr(x, '__index__') for x in [start, stop, step]):
step = auto_const_intp(step)
newdim = auto_const_intp((stop - start) // step)
else:
start, stop, step = [auto_const_intp(x) for x in [start, stop, step]]
tmp = builder.sub(stop, start)
newdim = builder.udiv(tmp, step)
store_at(builder, newshape, 0, newdim)
# Copy other dimensions over
for i in range(1, arr.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
raise NotImplementedError
# Fill-in strides
# Update data-ptr
def from_C_int(arr, index):
return from_C_ints(arr, (index,))
def from_C_ints(arr, key):
builder = arr.builder
num = len(key)
newnd = arr.nd - num
if newnd < 0:
raise ValueError("Too many keys")
new = arr.getview(nd=newnd)
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
# Load the shape array
for i in range(newnd):
val = load_at(builder, oldshape, i+num)
store_at(builder, newshape, i, val)
# Load the data-pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
new_data_ptr = get_data_ptr(builder, new.array_ptr)
loc = Constant.int(intp_type, 0)
factor = Constant.int(intp_type, 1)
for index in range(arr.nd-1,-1,-1):
val = load_at(builder, oldshape, index)
factor = builder.mul(factor, val)
if index < num: #
keyval = auto_const_intp(key[index])
# Multiply by strides
tmp = builder.mul(keyval, factor)
# Add to location
loc = builder.add(loc, tmp)
ptr = builder.gep(old_data_ptr, [loc])
builder.store(ptr, new_data_ptr)
return new
def from_C_slice(arr, start, end):
builder = arr.builder
new = arr.getview()
# Load the shape array
oldshape = get_shape_ptr(builder, arr.array_ptr)
newshape = get_shape_ptr(builder, new.array_ptr)
diff = Constant.int(intp_int, end-start)
store_at(builder, newshape, 0, diff)
for i in range(1, new.nd):
val = load_at(builder, oldshape, i)
store_at(builder, newshape, i, val)
# Data Pointer
old_data_ptr = get_data_ptr(builder, arr.array_ptr)
loc = Constant.int(intp_type, start)
while dim in arr.shape[1:]:
loc = builder.mul(loc, dim)
ptr = builder.gep(old_data_ptr, [loc])
new_data_ptr = get_data_ptr(builder, new.array_ptr)
builder.store(ptr, new_data_ptr)
return new
# get just the integers
def _convert(x):
if hasattr(x, '__index__'):
return x.__index__()
else:
return x
_keymsg = "Unsupported getitem value %s"
# val is either Ellipsis or slice object.
# check to see if start, stop, and/or step is given for slice
def _needstride(val):
if not isinstance(val, slice):
return False
if val.start is not None and val.start != 0:
return True
if val.stop is not None:
return True
if (val.step is not None) and (val.step != 1):
return True
return False
def _getitem_C(arr, key):
lastint = None
needstrided = False
# determine if 1) the elements of the geitem iterable are
# integers (LLVM or Python indexable), Ellipsis,
# or slice objects
# 2) the integer elements are all at the front
# so that the resulting slice is continuous
for i, val in enumerate(key):
if isinteger(val):
if lastint is not None:
needstrided = True
elif isinstance(val, (Ellipsis, slice)):
if lastint is None:
lastint = i
needstrided = _needstride(val)
else:
raise ValueError(_keymsg % val)
if not needstrided:
key = [_convert(x) for x in itertools.islice(key, lastint)]
return needstrided, key
def _getitem_F(arr, key):
# This looks for integers at the end of the key iterable
# arr[:,...,i,j] would not need strided
# arr[:,i,:,j] would need strided as would a[:,i,5:20,j]
# and a[:,...,5:10,j]
# elements can be integers or LLVM ints
# with indexing being done either at compile time (Python int)
# or run time (LLVM int)
last_elsl = None
needstrided = False
for i, val in enumerate(key):
if isinteger(val):
if last_elsl is None:
last_elsl = i
elif isinstance(val, (Ellipsis, slice)):
if last_elsl is not None:
needstrided = True
needstrided = needstrided or _needstride(val)
else:
raise ValueError(_keymsg % val)
# Return just the integers fields if needstrided not set
if not needstrided:
key = [_convert(x) for x in itertools.islice(key, lastint, None)]
return needstrided, key
def _getitem_S(arr, key):
return True, key
def from_Array(arr, key, char):
if isinteger(key):
return eval('from_%s_int' % char)(arr, key)
elif isinstance(key, slice):
if key == slice(None):
return arr
else:
start, stop, step = adjust_slice(arr, key)
if step == 1:
return eval('from_%s_slice' % char)(arr, start, stop)
else:
return eval('Sarr_from_%s_slice' % char)(arr, start, stop, step)
elif isiterable(key):
# will be less than arr._nd or have '...' or ':'
# at the end
needstrided, key = eval("_getitem_%s" % char)(arr, key)
if needstrided:
return eval('Sarr_from_%s' % char)(arr, key)
if len(key) > arr.nd:
raise ValueError('Too many indicies')
return eval('from_%s_ints' % char)(arr, key)
else:
raise ValueError(_keymsg % key)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/compute/llgetitem.py",
"copies": "2",
"size": "11489",
"license": "bsd-3-clause",
"hash": -9084881157025577000,
"line_mean": 29.2342105263,
"line_max": 80,
"alpha_frac": 0.598833667,
"autogenerated": false,
"ratio": 3.35739333722969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495622700422969,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
from toolz import curry
from datashape import DataShape, iscollection
from .collections import Shift
from .expressions import Field, Map, ElemWise, symbol, shape, Coerce
from .arithmetic import maxshape, UnaryOp, BinOp
from .datetime import DateTime
__all__ = ['broadcast', 'Broadcast', 'scalar_symbols']
def broadcast(expr, leaves, scalars=None):
scalars = scalars or scalar_symbols(leaves)
assert len(scalars) == len(leaves)
return Broadcast(tuple(leaves),
tuple(scalars),
expr._subs(dict(zip(leaves, scalars))))
class Broadcast(ElemWise):
""" Fuse scalar expressions over collections
Given elementwise operations on collections, e.g.
>>> from blaze import sin
>>> a = symbol('a', '100 * int')
>>> t = symbol('t', '100 * {x: int, y: int}')
>>> expr = sin(a) + t.y**2
It may be best to represent this as a scalar expression mapped over a
collection
>>> sa = symbol('a', 'int')
>>> st = symbol('t', '{x: int, y: int}')
>>> sexpr = sin(sa) + st.y**2
>>> expr = Broadcast((a, t), (sa, st), sexpr)
This provides opportunities for optimized computation.
In practice, expressions are often collected into Broadcast expressions
automatically. This class is mainly intented for internal use.
"""
__slots__ = '_hash', '_children', '_scalars', '_scalar_expr'
@property
def dshape(self):
myshape = maxshape(map(shape, self._children))
return DataShape(*(myshape + (self._scalar_expr.schema,)))
@property
def _inputs(self):
return self._children
@property
def _name(self):
return self._scalar_expr._name
@property
def _full_expr(self):
return self._scalar_expr._subs(dict(zip(self._scalars,
self._children)))
def scalar_symbols(exprs):
"""
Gives a sequence of scalar symbols to mirror these expressions
Examples
--------
>>> x = symbol('x', '5 * 3 * int32')
>>> y = symbol('y', '5 * 3 * int32')
>>> xx, yy = scalar_symbols([x, y])
>>> xx._name, xx.dshape
('x', dshape("int32"))
>>> yy._name, yy.dshape
('y', dshape("int32"))
"""
new_names = ('_%d' % i for i in itertools.count(1))
scalars = []
names = set()
for expr in exprs:
if expr._name and expr._name not in names:
name = expr._name
names.add(name)
else:
name = next(new_names)
s = symbol(name, expr.schema)
scalars.append(s)
return scalars
Broadcastable = (Map, Field, DateTime, UnaryOp, BinOp, Coerce, Shift)
WantToBroadcast = (Map, DateTime, UnaryOp, BinOp, Coerce, Shift)
def broadcast_collect(expr,
broadcastable=Broadcastable,
want_to_broadcast=WantToBroadcast,
no_recurse=None):
""" Collapse expression down using Broadcast - Tabular cases only
Expressions of type Broadcastables are swallowed into Broadcast
operations
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> expr = (t.x + 2*t.y).distinct()
>>> broadcast_collect(expr)
distinct(Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * t.y)))
>>> from blaze import exp
>>> expr = t.x + 2 * exp(-(t.x - 1.3) ** 2)
>>> broadcast_collect(expr)
Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * (exp(-((t.x - 1.3) ** 2)))))
"""
if (isinstance(expr, want_to_broadcast) and
iscollection(expr.dshape)):
leaves = leaves_of_type(broadcastable, expr)
expr = broadcast(expr, sorted(leaves, key=str))
if no_recurse is not None and isinstance(expr, no_recurse):
return expr
# Recurse down
children = (
broadcast_collect(i, broadcastable, want_to_broadcast, no_recurse)
for i in expr._inputs
)
return expr._subs({e: c for e, c in zip(expr._inputs, children)})
@curry
def leaves_of_type(types, expr):
""" Leaves of an expression skipping all operations of type ``types``
"""
if not isinstance(expr, types):
return set([expr])
else:
return set.union(*map(leaves_of_type(types), expr._inputs))
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/expr/broadcast.py",
"copies": "1",
"size": "4361",
"license": "bsd-3-clause",
"hash": -1183051264635120400,
"line_mean": 27.5032679739,
"line_max": 97,
"alpha_frac": 0.5941297867,
"autogenerated": false,
"ratio": 3.6432748538011697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9736686706268654,
"avg_score": 0.0001435868465033072,
"num_lines": 153
} |
from __future__ import absolute_import, division, print_function
import itertools
from toolz import curry
from datashape import DataShape, iscollection
from .expressions import Field, Map, ElemWise, symbol, shape
from .arithmetic import maxshape, Arithmetic, UnaryOp
from .math import Math
from .datetime import DateTime
__all__ = ['broadcast', 'Broadcast', 'scalar_symbols']
def broadcast(expr, leaves, scalars=None):
scalars = scalars or scalar_symbols(leaves)
assert len(scalars) == len(leaves)
return Broadcast(tuple(leaves),
tuple(scalars),
expr._subs(dict(zip(leaves, scalars))))
class Broadcast(ElemWise):
""" Fuse scalar expressions over collections
Given elementwise operations on collections, e.g.
>>> from blaze import sin
>>> a = symbol('a', '100 * int')
>>> t = symbol('t', '100 * {x: int, y: int}')
>>> expr = sin(a) + t.y**2
It may be best to represent this as a scalar expression mapped over a
collection
>>> sa = symbol('a', 'int')
>>> st = symbol('t', '{x: int, y: int}')
>>> sexpr = sin(sa) + st.y**2
>>> expr = Broadcast((a, t), (sa, st), sexpr)
This provides opportunities for optimized computation.
In practice, expressions are often collected into Broadcast expressions
automatically. This class is mainly intented for internal use.
"""
__slots__ = '_hash', '_children', '_scalars', '_scalar_expr'
@property
def dshape(self):
myshape = maxshape(map(shape, self._children))
return DataShape(*(myshape + (self._scalar_expr.schema,)))
@property
def _inputs(self):
return self._children
@property
def _name(self):
return self._scalar_expr._name
@property
def _full_expr(self):
return self._scalar_expr._subs(dict(zip(self._scalars,
self._children)))
def scalar_symbols(exprs):
"""
Gives a sequence of scalar symbols to mirror these expressions
Examples
--------
>>> x = symbol('x', '5 * 3 * int32')
>>> y = symbol('y', '5 * 3 * int32')
>>> xx, yy = scalar_symbols([x, y])
>>> xx._name, xx.dshape
('x', dshape("int32"))
>>> yy._name, yy.dshape
('y', dshape("int32"))
"""
new_names = ('_%d' % i for i in itertools.count(1))
scalars = []
names = set()
for expr in exprs:
if expr._name and expr._name not in names:
name = expr._name
names.add(name)
else:
name = next(new_names)
s = symbol(name, expr.schema)
scalars.append(s)
return scalars
Broadcastable = (Arithmetic, Math, Map, Field, DateTime, UnaryOp)
WantToBroadcast = (Arithmetic, Math, Map, DateTime, UnaryOp)
def broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=WantToBroadcast):
""" Collapse expression down using Broadcast - Tabular cases only
Expressions of type Broadcastables are swallowed into Broadcast
operations
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> expr = (t.x + 2*t.y).distinct()
>>> broadcast_collect(expr)
distinct(Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * t.y)))
>>> from blaze import exp
>>> expr = t.x + 2 * exp(-(t.x - 1.3) ** 2)
>>> broadcast_collect(expr)
Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * (exp(-((t.x - 1.3) ** 2)))))
"""
if (isinstance(expr, WantToBroadcast) and
iscollection(expr.dshape)):
leaves = leaves_of_type(Broadcastable, expr)
expr = broadcast(expr, sorted(leaves, key=str))
# Recurse down
children = [broadcast_collect(i, Broadcastable, WantToBroadcast)
for i in expr._inputs]
return expr._subs(dict(zip(expr._inputs, children)))
@curry
def leaves_of_type(types, expr):
""" Leaves of an expression skipping all operations of type ``types``
"""
if not isinstance(expr, types):
return set([expr])
else:
return set.union(*map(leaves_of_type(types), expr._inputs))
| {
"repo_name": "alexmojaki/blaze",
"path": "blaze/expr/broadcast.py",
"copies": "5",
"size": "4159",
"license": "bsd-3-clause",
"hash": -9201309584324724000,
"line_mean": 27.4863013699,
"line_max": 97,
"alpha_frac": 0.599903823,
"autogenerated": false,
"ratio": 3.6259808195292065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6725884642529206,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import itertools
import numpy as np
_counter = itertools.count()
def parameterized(names, params):
def decorator(func):
func.param_names = names
func.params = params
return func
return decorator
def requires_dask():
try:
import dask # noqa
except ImportError:
raise NotImplementedError
def randn(shape, frac_nan=None, chunks=None, seed=0):
rng = np.random.RandomState(seed)
if chunks is None:
x = rng.standard_normal(shape)
else:
import dask.array as da
rng = da.random.RandomState(seed)
x = rng.standard_normal(shape, chunks=chunks)
if frac_nan is not None:
inds = rng.choice(range(x.size), int(x.size * frac_nan))
x.flat[inds] = np.nan
return x
def randint(low, high=None, size=None, frac_minus=None, seed=0):
rng = np.random.RandomState(seed)
x = rng.randint(low, high, size)
if frac_minus is not None:
inds = rng.choice(range(x.size), int(x.size * frac_minus))
x.flat[inds] = -1
return x
| {
"repo_name": "shoyer/xray",
"path": "asv_bench/benchmarks/__init__.py",
"copies": "1",
"size": "1128",
"license": "apache-2.0",
"hash": 5933893693757480000,
"line_mean": 22.5,
"line_max": 66,
"alpha_frac": 0.6320921986,
"autogenerated": false,
"ratio": 3.4922600619195046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624352260519504,
"avg_score": 0,
"num_lines": 48
} |
from __future__ import absolute_import, division, print_function
import json, ijson
from itertools import chain
def _build_value(data):
''' Build a value (number, array, whatever) from an ijson stream.
'''
for (prefix, event, value) in data:
if event in ('string', 'null', 'boolean'):
return value
elif event == 'number':
return int(value) if (int(value) == float(value)) else float(value)
elif event == 'start_array':
return _build_list(data)
elif event == 'start_map':
return _build_map(data)
else:
# MOOP.
raise ValueError((prefix, event, value))
def _build_list(data):
''' Build a list from an ijson stream.
Stop when 'end_array' is reached.
'''
output = list()
for (prefix, event, value) in data:
if event == 'end_array':
break
else:
# let _build_value() handle the array item.
_data = chain([(prefix, event, value)], data)
output.append(_build_value(_data))
return output
def _build_map(data):
''' Build a dictionary from an ijson stream.
Stop when 'end_map' is reached.
'''
output = dict()
for (prefix, event, value) in data:
if event == 'end_map':
break
elif event == 'map_key':
output[value] = _build_value(data)
else:
# MOOP.
raise ValueError((prefix, event, value))
return output
def sample_geojson(stream, max_features):
''' Read a stream of input GeoJSON and return a string with a limited feature count.
'''
features = list()
for feature in stream_geojson(stream):
if len(features) == max_features:
break
features.append(feature)
geojson = dict(type='FeatureCollection', features=features)
return json.dumps(geojson)
def stream_geojson(stream):
'''
'''
data = ijson.parse(stream)
for (prefix1, event1, value1) in data:
if event1 != 'start_map':
# A root GeoJSON object is a map.
raise ValueError((prefix1, event1, value1))
for (prefix2, event2, value2) in data:
if event2 == 'map_key' and value2 == 'type':
prefix3, event3, value3 = next(data)
if event3 != 'string' and value3 != 'FeatureCollection':
# We only want GeoJSON feature collections
raise ValueError((prefix3, event3, value3))
elif event2 == 'map_key' and value2 == 'features':
prefix4, event4, value4 = next(data)
if event4 != 'start_array':
# We only want lists of features here.
raise ValueError((prefix4, event4, value4))
for (prefix5, event5, value5) in data:
if event5 == 'end_array':
break
# let _build_value() handle the feature.
_data = chain([(prefix5, event5, value5)], data)
feature = _build_value(_data)
yield feature
| {
"repo_name": "openaddresses/machine",
"path": "openaddr/sample.py",
"copies": "1",
"size": "3167",
"license": "isc",
"hash": -5912728485392540000,
"line_mean": 27.7909090909,
"line_max": 88,
"alpha_frac": 0.5402589201,
"autogenerated": false,
"ratio": 4.167105263157895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023090074228921367,
"num_lines": 110
} |
from __future__ import absolute_import, division, print_function
import json
from abc import ABCMeta, abstractmethod
from functools import partial
from tempfile import TemporaryFile
import six
from represent import autorepr
from .compat.contextlib import suppress
__all__ = (
'DefaultKeyMaker',
'StreamingDefaultKeyMaker',
)
@six.add_metaclass(ABCMeta)
class KeyMaker(object):
"""KeyMaker abstract base class."""
@abstractmethod
def make_key(self, obj):
"""Make key from passed object.
Parameters:
obj: Any Python object.
Yields:
bytes of key to represent object.
"""
raise NotImplementedError
@autorepr
class DefaultKeyMaker(KeyMaker):
"""Default KeyMaker that is consistent across Python versions.
Uses :py:class:`_AnyObjectJSONEncoder` to convert any object into a string
representation.
Parameters:
sort_keys (bool): Sort dictionary keys for consistency across Python
versions with different hash algorithms.
"""
def __init__(self, sort_keys=True):
self.sort_keys = sort_keys
def make_key(self, obj):
keystr = json.dumps(
obj, sort_keys=self.sort_keys, cls=_AnyObjectJSONEncoder)
yield keystr.encode('utf-8')
class StreamingDefaultKeyMaker(DefaultKeyMaker):
"""Subclass of DefaultKeyMaker that uses a temporary file to save memory."""
def make_key(self, obj):
with TemporaryFile(mode='w+') as f:
json.dump(
obj, f, sort_keys=self.sort_keys, cls=_AnyObjectJSONEncoder)
f.seek(0)
for data in iter(partial(f.read, 65536), ''):
yield data.encode('utf-8')
class _AnyObjectJSONEncoder(json.JSONEncoder):
"""Serialize objects that can't normally be serialized by json.
Attempts to get state will be done in this order:
- ``o.__getstate__()``
- Parameters from ``o.__slots__``
- ``o.__dict__``
- ``repr(o)``
"""
def default(self, o):
with suppress(TypeError):
return json.JSONEncoder.default(self, o)
with suppress(AttributeError):
return o.__getstate__()
if hasattr(o, '__slots__'):
all_slots = set()
for cls in o.__class__.__mro__:
slots = getattr(cls, '__slots__', tuple())
slots = normalise_slots(slots)
all_slots.update(slots)
return {k: getattr(o, k) for k in all_slots if hasattr(o, k)}
with suppress(AttributeError):
return o.__dict__
return repr(o)
def normalise_slots(obj):
"""__slots__ can be a string for single attribute. Return inside tuple."""
if isinstance(obj, six.string_types):
return (obj,)
else:
return obj
| {
"repo_name": "RazerM/bucketcache",
"path": "bucketcache/keymakers.py",
"copies": "1",
"size": "2836",
"license": "mit",
"hash": -2068663004951174100,
"line_mean": 26.5339805825,
"line_max": 80,
"alpha_frac": 0.6096614951,
"autogenerated": false,
"ratio": 4.283987915407855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5393649410507855,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
from base64 import b64decode
from flask import Blueprint, current_app, jsonify, request
import appr.api.impl.registry
from appr.api.app import getvalues, repo_name
from appr.exception import (
ApprException, ChannelNotFound, InvalidParams, InvalidRelease, InvalidUsage,
PackageAlreadyExists, PackageNotFound, PackageReleaseNotFound, UnableToLockResource,
UnauthorizedAccess, Unsupported)
from appr.models import DEFAULT_MEDIA_TYPE, Blob, Channel, Package
registry_app = Blueprint(
'registry',
__name__, )
@registry_app.errorhandler(Unsupported)
@registry_app.errorhandler(PackageAlreadyExists)
@registry_app.errorhandler(InvalidRelease)
@registry_app.errorhandler(UnableToLockResource)
@registry_app.errorhandler(UnauthorizedAccess)
@registry_app.errorhandler(PackageNotFound)
@registry_app.errorhandler(PackageReleaseNotFound)
@registry_app.errorhandler(ApprException)
@registry_app.errorhandler(InvalidUsage)
@registry_app.errorhandler(InvalidParams)
@registry_app.errorhandler(ChannelNotFound)
def render_error(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@registry_app.before_request
def pre_request_logging():
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody:
values.update(jsonbody)
current_app.logger.info("request", extra={
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"data": values,
"headers": dict(request.headers.to_list())})
@registry_app.route("/test_error")
def test_error():
raise InvalidUsage("error message", {"path": request.path})
def _pull(data, json_format=True):
if json_format:
resp = jsonify(data)
else:
resp = current_app.make_response(b64decode(data['blob']))
resp.headers['Content-Disposition'] = data['filename']
resp.mimetype = 'application/x-gzip'
return resp
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/blobs/sha256/<string:digest>",
methods=['GET'], strict_slashes=False)
def blobs(namespace, package_name, digest):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull_blob(reponame, digest, blob_class=Blob)
json_format = request.args.get('format', None) == 'json'
return _pull(data, json_format=json_format)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/blobs/sha256/<string:digest>/json",
methods=['GET'], strict_slashes=False)
def blobs_json(namespace, package_name, digest):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull_blob(reponame, digest, blob_class=Blob)
return _pull(data, json_format=True)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>/pull",
methods=['GET'], strict_slashes=False)
def pull(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull(reponame, release, media_type, Package, blob_class=Blob)
json_format = request.args.get('format', None) == 'json'
return _pull(data, json_format=json_format)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>/pull/json",
methods=['GET'], strict_slashes=False)
def pull_json(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
data = appr.api.impl.registry.pull(reponame, release, media_type, Package, blob_class=Blob)
return _pull(data, json_format=True)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>", methods=['POST'],
strict_slashes=False)
def push(namespace, package_name):
reponame = repo_name(namespace, package_name)
values = getvalues()
release = values['release']
media_type = values.get('media_type', DEFAULT_MEDIA_TYPE)
force = (values.get('force', 'false') == 'true')
metadata = values.get('metadata', None)
blob = Blob(reponame, values['blob'])
result = appr.api.impl.registry.push(reponame, release, media_type, blob, force, Package,
metadata=metadata)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>",
methods=['DELETE'], strict_slashes=False)
def delete_package(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_package(reponame, release, media_type,
package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages", methods=['GET'], strict_slashes=False)
def list_packages():
values = getvalues()
namespace = values.get('namespace', None)
result = appr.api.impl.registry.list_packages(namespace, Package, search=values.get(
'query', None), media_type=values.get('media_type', None))
resp = current_app.make_response(json.dumps(result))
resp.mimetype = 'application/json'
return resp
@registry_app.route("/api/v1/packages/search", methods=['GET'], strict_slashes=False)
def search_packages():
values = getvalues()
query = values.get("q")
result = appr.api.impl.registry.search(query, Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>/<string:media_type>",
methods=['GET'], strict_slashes=False)
def show_package(namespace, package_name, release, media_type):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_package(reponame, release, media_type,
channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>", methods=['GET'],
strict_slashes=False)
def show_package_releases(namespace, package_name):
reponame = repo_name(namespace, package_name)
media_type = getvalues().get('media_type', None)
result = appr.api.impl.registry.show_package_releases(reponame, media_type=media_type,
package_class=Package)
return jsonify(result)
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>/<string:release>",
methods=['GET'], strict_slashes=False)
def show_package_release_manifests(namespace, package_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_package_manifests(reponame, release,
package_class=Package)
return jsonify(result)
# CHANNELS
@registry_app.route("/api/v1/packages/<string:namespace>/<string:package_name>/channels", methods=[
'GET'], strict_slashes=False)
def list_channels(namespace, package_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.list_channels(reponame, Channel)
resp = current_app.make_response(json.dumps(result))
resp.mimetype = 'application/json'
return resp
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>",
methods=['GET'], strict_slashes=False)
def show_channel(namespace, package_name, channel_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.show_channel(reponame, channel_name, Channel)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>/<string:release>",
methods=['POST'], strict_slashes=False)
def add_channel_release(namespace, package_name, channel_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.add_channel_release(
reponame, channel_name, release, channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>/<string:release>",
methods=['DELETE'], strict_slashes=False)
def delete_channel_release(namespace, package_name, channel_name, release):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_channel_release(
reponame, channel_name, release, channel_class=Channel, package_class=Package)
return jsonify(result)
@registry_app.route(
"/api/v1/packages/<string:namespace>/<string:package_name>/channels/<string:channel_name>",
methods=['DELETE'], strict_slashes=False)
def delete_channel(namespace, package_name, channel_name):
reponame = repo_name(namespace, package_name)
result = appr.api.impl.registry.delete_channel(reponame, channel_name, channel_class=Channel)
return jsonify(result)
| {
"repo_name": "app-registry/appr",
"path": "appr/api/registry.py",
"copies": "2",
"size": "9277",
"license": "apache-2.0",
"hash": 3209186167064203000,
"line_mean": 40.0486725664,
"line_max": 112,
"alpha_frac": 0.7001185728,
"autogenerated": false,
"ratio": 3.592951200619675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293069773419675,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
from base64 import b64encode
import pytest
import requests
import requests_mock
import appr
from appr.client import DEFAULT_PREFIX, DEFAULT_REGISTRY, ApprClient
@pytest.fixture()
def channels_data():
return {'dev': {'current': '1.0.0-rc', 'name': 'dev'}}
@pytest.fixture(autouse=True)
def fakehome(fake_home):
pass
def test_headers_without_auth():
r = ApprClient()
assert sorted(r.headers.keys()) == ['Content-Type', 'User-Agent']
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_headers_with_auth():
r = ApprClient()
r.auth.add_token('http://localhost:5000/cnr', 'titi')
assert sorted(r.headers.keys()) == ["Authorization", 'Content-Type', 'User-Agent']
assert r.headers["Authorization"] == "titi"
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_headers_with_auth_star():
r = ApprClient()
r.auth.add_token('*', 'titi')
assert sorted(r.headers.keys()) == ["Authorization", 'Content-Type', 'User-Agent']
assert r.headers["Authorization"] == "titi"
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_default_endpoint():
r = ApprClient(endpoint=None)
assert r.endpoint.geturl() == DEFAULT_REGISTRY + DEFAULT_PREFIX
def test_url():
r = ApprClient(endpoint="http://test.com")
assert r._url("/test") == "http://test.com" + DEFAULT_PREFIX + "/test"
def test_url_prefix():
r = ApprClient(endpoint="http://test.com/test")
assert r._url("/2") == "http://test.com/test" + DEFAULT_PREFIX + "/2"
def test_pull():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.0/helm/pull", content=response)
assert r.pull("orga/p1", {"value": "1.0.0", "key": "version"}, "helm") == response
def test_pull_channel(channels_data):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.0-rc/helm/pull", content=response)
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/channels/dev",
text=json.dumps(channels_data['dev']))
assert r.pull("orga/p1", {"value": "dev", "key": "channel"}, "helm") == response
def test_pull_digest():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/blobs/sha256/2432", content=response)
assert r.pull("orga/p1", {"key": "digest", "value": "2432"}, "helm") == response
def test_pull_version():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/0.8.1/helm/pull", content=response)
assert r.pull("orga/p1", {"key": "version", "value": "0.8.1"}, "helm") == response
def test_pull_discovery_https(discovery_html):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get("https://appr.sh/?appr-discovery=1", text=discovery_html, complete_qs=True)
m.get("https://api.kubespray.io/api/v1/packages/orga/p1/pull", content=response)
assert r.pull("appr.sh/orga/p1", {"key": "version", "value": "1.0.0"}, "helm") == response
def test_pull_discovery_http(discovery_html):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get("https://appr.sh/?appr-discovery=1", text="<html/>", complete_qs=True)
m.get("http://appr.sh/?appr-discovery=1", text=discovery_html, complete_qs=True)
m.get("https://api.kubespray.io/api/v1/packages/orga/p1/pull", content=response)
assert r.pull("appr.sh/orga/p1", {"key": "version", "value": "1.0.0"}, "helm") == response
def test_pull_with_version():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.1/helm/pull", complete_qs=True, content=response)
assert r.pull("orga/p1", {"key": "version", "value": "1.0.1"}, "helm") == response
def test_list_packages():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages", text=response)
assert json.dumps(r.list_packages({})) == response
def test_list_packages_username():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?username=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({'username': "ant31"})) == response
def test_list_packages_orga():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?namespace=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({'namespace': "ant31"})) == response
def test_list_packages_orga_and_user():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?username=titi&namespace=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({"username": "titi", "namespace": "ant31"})) == response
def test_delete_package():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm", complete_qs=True, text=response)
assert r.delete_package("ant31/kube-ui", "1.4.3", "helm") == {"packages": "true"}
def test_delete_package_version():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm", complete_qs=True, text=response)
assert r.delete_package(name="ant31/kube-ui", version="1.4.3", media_type="helm") == {"packages": "true"}
def test_delete_package_unauthorized():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm",
complete_qs=True,
text=response,
status_code=401)
with pytest.raises(requests.HTTPError):
r.delete_package("ant31/kube-ui", "1.4.3", "helm")
def test_push_unauthorized():
r = ApprClient()
with requests_mock.mock() as m:
body = {"blob": "fdsfds"}
response = b'{"packages": "true"}'
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=false",
complete_qs=True,
content=response,
status_code=401)
with pytest.raises(requests.HTTPError):
r.push(name="ant31/kube-ui", body=body)
def test_push():
body = {"blob": b64encode(b"testdata").decode('utf-8')}
r = ApprClient()
response = '{"packages": "true"}'
with requests_mock.mock() as m:
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=false",
complete_qs=True,
text=response)
assert json.dumps(r.push(name="ant31/kube-ui", body=body)) == json.dumps(json.loads(response))
def test_push_force():
body = {"blob": b64encode(b"foobar").decode('utf-8')}
r = ApprClient()
response = '{"packages": "true"}'
with requests_mock.mock() as m:
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=true",
complete_qs=True,
text=response)
assert json.dumps(r.push(name="ant31/kube-ui", body=body, force=True)) == json.dumps(json.loads(response))
def test_get_version():
r = ApprClient()
response = '{"appr-server": "0.23.0"}'
with requests_mock.mock() as m:
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/version",
complete_qs=True,
text=response)
assert json.dumps(r.version()) == json.dumps(json.loads(response))
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "tests/test_registry.py",
"copies": "2",
"size": "8690",
"license": "apache-2.0",
"hash": -3619377675580104000,
"line_mean": 37.281938326,
"line_max": 132,
"alpha_frac": 0.6230149597,
"autogenerated": false,
"ratio": 3.139450867052023,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9754750506116219,
"avg_score": 0.0015430641271611454,
"num_lines": 227
} |
from __future__ import absolute_import, division, print_function
import json
from blaze.catalog.blaze_url import split_array_base
import dynd
from dynd import nd, ndt
from dynd.nd import as_numpy
from blaze import array
class compute_session:
def __init__(self, base_url, array_name):
session_name, root_dir = array_provider.create_session_dir()
self.session_name = session_name
self.root_dir = root_dir
self.array_name = array_name
self.base_url = base_url
def get_session_array(self, array_name = None):
if array_name is None:
array_name = self.array_name
array_root, indexers = split_array_base(array_name)
arr = self.array_provider(array_root)
if arr is None:
raise Exception('No Blaze Array named ' + array_root)
for i in indexers:
if type(i) in [slice, int, tuple]:
arr = arr[i]
else:
arr = getattr(arr, i)
return arr
def creation_response(self):
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session' : self.base_url + self.session_name,
'version' : 'prototype',
'dynd_python_version': dynd.__version__,
'dynd_version' : dynd.__libdynd_version__,
'access' : 'no permission model yet'
})
return (content_type, body)
def close(self):
print('Deleting files for session %s' % self.session_name)
self.array_provider.delete_session_dir(self.session_name)
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'action': 'closed'
})
return (content_type, body)
def sort(self, json_cmd):
import numpy as np
print ('sorting')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
field = cmd['field']
arr = self.get_session_array(array_name)
nparr = as_numpy(arr)
idxs = np.argsort(nparr[field])
res = nd.ndobject(nparr[idxs])
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'sort_', res)
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'sort',
'params': {
'field': field,
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
def groupby(self, json_cmd):
print('GroupBy operation')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
arr = self.get_session_array(array_name)[...]._data.dynd_arr()
# Do the groupby, get its groups, then
# evaluate it because deferred operations
# through the groupby won't work well yet.
res = nd.groupby(arr, nd.fields(arr, *fields))
groups = res.groups
res = res.eval()
# Write out the groupby result
defarr_gb = self.array_provider.create_deferred_array_filename(
self.session_name, 'groupby_', array(res))
dshape_gb = nd.dshape_of(res)
defarr_gb[0].write(json.dumps({
'dshape': dshape_gb,
'command': 'groupby',
'params': {
'fields': fields
}
}))
defarr_gb[0].close()
# Write out the groups
defarr_groups = self.array_provider.create_deferred_array_filename(
self.session_name, 'groups_', groups)
dshape_groups = nd.dshape_of(groups)
defarr_groups[0].write(json.dumps({
'dshape': dshape_groups,
'command': 'groupby.groups',
'params': {
'fields': fields
}
}))
defarr_groups[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output_gb': self.base_url + defarr_gb[1],
'dshape_gb': dshape_gb,
'output_groups': self.base_url + defarr_groups[1],
'dshape_groups': dshape_groups
})
return (content_type, body)
def add_computed_fields(self, json_cmd):
print('Adding computed fields')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
rm_fields = cmd.get('rm_fields', [])
fnname = cmd.get('fnname', None)
arr = self.get_session_array(array_name)._data.dynd_arr()
res = nd.add_computed_fields(arr, fields, rm_fields, fnname)
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'computed_fields_', array(res))
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'add_computed_fields',
'params': {
'fields': fields,
'rm_fields': rm_fields,
'fnname': fnname
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
def make_computed_fields(self, json_cmd):
print('Adding computed fields')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
replace_undim = cmd.get('replace_undim', 0)
fnname = cmd.get('fnname', None)
arr = self.get_session_array(array_name)._data.dynd_arr()
res = nd.make_computed_fields(arr, replace_undim, fields, fnname)
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'computed_fields_', array(res))
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'make_computed_fields',
'params': {
'fields': fields,
'replace_undim': replace_undim,
'fnname': fnname
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
| {
"repo_name": "markflorisson/blaze-core",
"path": "blaze/io/server/compute_session.py",
"copies": "7",
"size": "7979",
"license": "bsd-3-clause",
"hash": 3726957083972581000,
"line_mean": 37.3605769231,
"line_max": 75,
"alpha_frac": 0.5354054393,
"autogenerated": false,
"ratio": 3.8508687258687258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7886274165168725,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
from cStringIO import StringIO
import urllib
import bottle
from dossier.fc import FeatureCollection
import dossier.web.routes as routes
from dossier.web.tests import config_local, kvl, store, label_store # noqa
def rot14(s):
# Use `rot14` so that `rot14(rot14(s)) != s`.
return ''.join(chr(ord('a') + ((ord(c) - ord('a')) + 14) % 26) for c in s)
def dbid_to_visid(s):
return rot14(s)
def visid_to_dbid(s):
return rot14(s)
def new_request(params=None, body=None):
environ = {'wsgi.input': StringIO('')}
if params is not None:
environ['QUERY_STRING'] = urllib.urlencode(params)
if body is not None:
environ['wsgi.input'] = StringIO(body)
environ['CONTENT_LENGTH'] = len(body)
return bottle.Request(environ=environ)
def new_response():
return bottle.Response()
def test_fc_put(store): # noqa
req = new_request(body=json.dumps({'foo': {'a': 1}}))
resp = new_response()
routes.v1_fc_put(req, resp, visid_to_dbid, store, 'abc')
assert store.get(visid_to_dbid('abc'))['foo']['a'] == 1
def test_fc_get(store): # noqa
store.put([(visid_to_dbid('abc'), FeatureCollection({'foo': {'a': 1}}))])
fc = routes.v1_fc_get(dbid_to_visid, store, 'abc')
assert fc['foo']['a'] == 1
| {
"repo_name": "dossier/dossier.web",
"path": "dossier/web/tests/test_routes.py",
"copies": "1",
"size": "1347",
"license": "mit",
"hash": -4067338607863798300,
"line_mean": 24.9038461538,
"line_max": 78,
"alpha_frac": 0.6384558278,
"autogenerated": false,
"ratio": 2.9866962305986697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.912515205839867,
"avg_score": 0,
"num_lines": 52
} |
from __future__ import absolute_import, division, print_function
import json
from csv import DictReader
from io import StringIO
from base64 import b64decode
from operator import itemgetter
from os.path import join, dirname, splitext, relpath
from dateutil.parser import parse as parse_datetime
from urllib.parse import urljoin
from os import environ
from re import compile
import json, pickle
import requests
from uritemplate import expand as expand_uri
from . import S3, __version__
# Sort constants for summarize_runs()
GLASS_HALF_FULL = 1
GLASS_HALF_EMPTY = 2
def _get_cached(memcache, key):
''' Get a thing from the cache, or None.
'''
if not memcache:
return None
pickled = memcache.get(key)
if pickled is None:
return None
try:
value = pickle.loads(pickled)
except Exception as e:
return None
else:
return value
def _set_cached(memcache, key, value):
''' Put a thing in the cache, if it exists.
'''
if not memcache:
return
pickled = pickle.dumps(value, protocol=2)
memcache.set(key, pickled)
def is_coverage_complete(source):
'''
'''
if 'coverage' in source:
cov = source['coverage']
if ('ISO 3166' in cov or 'US Census' in cov or 'geometry' in cov):
return True
return False
def state_conform_type(state):
'''
'''
if 'cache' not in state.keys:
return None
if state.cache is None:
return None
if state.cache.endswith('.zip'):
if state.geometry_type in ('Polygon', 'MultiPolygon'):
return 'shapefile-polygon'
else:
return 'shapefile'
elif state.cache.endswith('.json'):
return 'geojson'
elif state.cache.endswith('.csv'):
return 'csv'
else:
return None
def convert_run(memcache, run, url_template):
'''
'''
cache_key = 'converted-run-{}-{}'.format(run.id, __version__)
cached_run = _get_cached(memcache, cache_key)
if cached_run is not None:
return cached_run
try:
source = json.loads(b64decode(run.source_data).decode('utf8'))
except:
source = {}
run_state = run.state or {}
converted_run = {
'address count': run_state.address_count,
'cache': run_state.cache,
'cache time': run_state.cache_time,
'cache_date': run.datetime_tz.strftime('%Y-%m-%d'),
'conform': bool(source.get('conform', False)),
'conform type': state_conform_type(run_state),
'coverage complete': is_coverage_complete(source),
'fingerprint': run_state.fingerprint,
'geometry type': run_state.geometry_type,
'href': expand_uri(url_template, run.__dict__),
'output': run_state.output,
'process time': run_state.process_time,
'processed': run_state.processed,
'sample': run_state.sample,
'run_id': run.id,
'shortname': splitext(relpath(run.source_path, 'sources'))[0],
'skip': bool(source.get('skip', False)),
'source': relpath(run.source_path, 'sources'),
'type': source.get('type', '').lower(),
'version': run_state.version,
'source problem': run_state.source_problem
}
_set_cached(memcache, cache_key, converted_run)
return converted_run
def run_counts(runs):
'''
'''
states = [(run.state or {}) for run in runs]
return {
'sources': len(runs),
'cached': sum([int(bool(state.cache)) for state in states]),
'processed': sum([int(bool(state.processed)) for state in states]),
'addresses': sum([int(state.address_count or 0) for state in states])
}
def sort_run_dicts(dicts, sort_order):
'''
'''
if sort_order is GLASS_HALF_FULL:
# Put the happy, successful stuff up front.
key = lambda d: (not bool(d['processed']), not bool(d['cache']), d['source'])
elif sort_order is GLASS_HALF_EMPTY:
# Put the stuff that needs help up front.
key = lambda d: (bool(d['cache']), bool(d['processed']), d['source'])
else:
raise ValueError('Unknown sort order "{}"'.format(sort_order))
dicts.sort(key=key)
def nice_integer(number):
''' Format a number like '999,999,999'
'''
string = str(number)
pattern = compile(r'^(\d+)(\d\d\d)\b')
while pattern.match(string):
string = pattern.sub(r'\1,\2', string)
return string
def break_state(string):
''' Adds <wbr> tag and returns an HTML-safe string.
'''
pattern = compile(r'^(.+)/([^/]+)$')
string = string.replace('&', '&').replace('<', '<').replace('>', '>')
if pattern.match(string):
string = pattern.sub(r'\1/<wbr>\2', string)
return string
def summarize_runs(memcache, runs, datetime, owner, repository, sort_order):
''' Return summary data for set.html template.
'''
base_url = expand_uri(u'https://github.com/{owner}/{repository}/',
dict(owner=owner, repository=repository))
url_template = urljoin(base_url, u'blob/{commit_sha}/{+source_path}')
states = [convert_run(memcache, run, url_template) for run in runs]
counts = run_counts(runs)
sort_run_dicts(states, sort_order)
return dict(states=states, last_modified=datetime, counts=counts)
| {
"repo_name": "openaddresses/machine",
"path": "openaddr/summarize.py",
"copies": "1",
"size": "5331",
"license": "isc",
"hash": 5817250398828652000,
"line_mean": 27.8162162162,
"line_max": 85,
"alpha_frac": 0.6096417183,
"autogenerated": false,
"ratio": 3.689273356401384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4798915074701384,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
from toolz.curried import map, take, pipe, pluck, get, concat, filter
from collections import Iterator, Iterable
import os
from contextlib import contextmanager
from datashape import discover, var, dshape, Record, DataShape
from datashape import coretypes as ct
from datashape.dispatch import dispatch
import gzip
import datetime
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..utils import tuples_to_records
class JSON(object):
""" Proxy for a JSON file
Parameters
----------
path : str
Path to file on disk
See Also
--------
JSONLines - Line-delimited JSON
"""
def __init__(self, path):
self.path = path
class JSONLines(object):
""" Proxy for a line-delimited JSON file
Each line in the file is a valid JSON entry
Parameters
----------
path : str
Path to file on disk
See Also
--------
JSON - Not-line-delimited JSON
"""
def __init__(self, path):
self.path = path
def date_to_datetime_dshape(ds):
shape = ds.shape
if isinstance(ds.measure, Record):
measure = Record([[name, ct.datetime_ if typ == ct.date_ else typ]
for name, typ in ds.measure.parameters[0]])
else:
measure = ds.measure
return DataShape(*(shape + (measure,)))
@discover.register(JSON)
def discover_json(j, **kwargs):
data = json_load(j.path)
ds = discover(data)
return date_to_datetime_dshape(ds)
def nonempty(line):
return len(line.strip()) > 0
@discover.register(JSONLines)
def discover_jsonlines(j, n=10, encoding='utf-8', **kwargs):
with json_lines(j.path, encoding=encoding) as lines:
data = pipe(lines, filter(nonempty), map(json.loads), take(n), list)
if len(data) < n:
ds = discover(data)
else:
ds = var * discover(data).subshape[0]
return date_to_datetime_dshape(ds)
@convert.register(list, JSON)
def json_to_list(j, dshape=None, **kwargs):
return json_load(j.path, **kwargs)
@convert.register(Iterator, JSONLines)
def json_lines_to_iterator(j, encoding='utf-8', **kwargs):
with json_lines(j.path, encoding=encoding) as lines:
for item in pipe(lines, filter(nonempty), map(json.loads)):
yield item
@contextmanager
def json_lines(path, encoding='utf-8'):
""" Return lines of a json-lines file
Handles compression like gzip """
if path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(path)
lines = (line.decode(encoding) for line in f)
else:
f = open(path)
lines = f
try:
yield lines
finally:
f.close()
def json_load(path, encoding='utf-8', **kwargs):
""" Return data of a json file
Handles compression like gzip """
if path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(path)
s = f.read().decode(encoding)
else:
f = open(path)
s = f.read()
data = json.loads(s)
f.close()
return data
@append.register(JSONLines, object)
def object_to_jsonlines(j, o, **kwargs):
return append(j, convert(Iterator, o, **kwargs), **kwargs)
@append.register(JSONLines, Iterator)
def iterator_to_json_lines(j, seq, dshape=None, encoding='utf-8', **kwargs):
row = next(seq)
seq = concat([[row], seq])
if not isinstance(row, (dict, str)) and isinstance(row, Iterable):
seq = tuples_to_records(dshape, seq)
lines = (json.dumps(item, default=json_dumps) for item in seq)
# Open file
if j.path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(j.path, 'ab')
lines2 = (line.encode(encoding) for line in lines)
endl = b'\n'
else:
f = open(j.path, 'a')
lines2 = lines
endl = '\n'
for line in lines2:
f.write(line)
f.write(endl)
f.close()
return j
@append.register(JSON, list)
def list_to_json(j, seq, dshape=None, encoding='utf-8', **kwargs):
if not isinstance(seq[0], (dict, str)) and isinstance(seq[0], Iterable):
seq = list(tuples_to_records(dshape, seq))
if os.path.exists(j.path):
with open(j.path) as f:
if json.load(f):
raise ValueError("Can only append to empty JSON File.\n"
"Either remove contents from this file, save to a new file \n"
"or use line-delimited JSON format.\n"
"Consider using the jsonlines:// protocol, e.g.\n"
"\tinto('jsonlines://%s', your-data)" % j.path)
text = json.dumps(seq, default=json_dumps)
if j.path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(j.path, 'wb')
text = text.encode(encoding)
else:
f = open(j.path, 'w')
f.write(text)
f.close()
return j
@append.register(JSON, object)
def object_to_json(j, o, **kwargs):
return append(j, convert(list, o, **kwargs), **kwargs)
@resource.register('json://.*\.json(\.gz)?', priority=11)
def resource_json(path, **kwargs):
if 'json://' in path:
path = path[len('json://'):]
return JSON(path)
@resource.register('.*\.jsonlines(\.gz)?', priority=11)
@resource.register('jsonlines://.*\.json(\.gz)?', priority=11)
def resource_jsonlines(path, **kwargs):
if 'jsonlines://' in path:
path = path[len('jsonlines://'):]
return JSONLines(path)
@resource.register('.*\.json(\.gz)?')
def resource_json_ambiguous(path, **kwargs):
""" Try to guess if this file is line-delimited or not """
if os.path.exists(path):
f = open(path)
one = next(f)
try:
two = next(f)
except StopIteration: # only one line
f.close()
return resource_json(path, **kwargs)
try:
json.loads(one)
f.close()
return resource_jsonlines(path, **kwargs)
except:
f.close()
return resource_json(path, **kwargs)
# File doesn't exist, is the dshape variable length?
dshape = kwargs.get('expected_dshape', None)
if dshape and dshape[0] == var:
return resource_jsonlines(path, **kwargs)
else:
return resource_json(path, **kwargs)
@dispatch(datetime.datetime)
def json_dumps(dt):
s = dt.isoformat()
if not dt.tzname():
s = s + 'Z'
return s
@dispatch(datetime.date)
def json_dumps(dt):
return dt.isoformat()
@convert.register(chunks(list), chunks(JSON))
def convert_glob_of_jsons_into_chunks_of_lists(jsons, **kwargs):
def _():
return concat(convert(chunks(list), js, **kwargs) for js in jsons)
return chunks(list)(_)
@convert.register(chunks(Iterator), chunks(JSONLines))
def convert_glob_of_jsons_into_chunks_of_lists(jsons, **kwargs):
def _():
return concat(convert(chunks(Iterator), js, **kwargs) for js in jsons)
return chunks(Iterator)(_)
ooc_types.add(JSONLines)
| {
"repo_name": "mrocklin/into",
"path": "into/backends/json.py",
"copies": "1",
"size": "7011",
"license": "bsd-3-clause",
"hash": 6566293374856239000,
"line_mean": 24.4945454545,
"line_max": 78,
"alpha_frac": 0.6083297675,
"autogenerated": false,
"ratio": 3.4435166994106092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4551846466910609,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import os.path
from collections import OrderedDict
import jsonpatch
import yaml
from appr.display import print_deploy_result
from appr.formats.appr.kub_base import KubBase
from appr.platforms.kubernetes import ANNOTATIONS, Kubernetes, get_endpoint
from appr.utils import colorize, mkdir_p
logger = logging.getLogger(__name__)
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
class Kub(KubBase):
media_type = 'appr'
platform = "kubernetes"
def _resource_name(self, resource):
return resource.get('name', resource['value']['metadata']['name'])
def _resource_build(self, kub, resource):
self._annotate_resource(kub, resource)
kind = resource['value']['kind'].lower()
return {
"file":
resource.get('file', "%s-%s.yaml" % (self._resource_name(resource), kind)),
"update_mode":
resource.get('update_mode', 'update'),
"hash":
resource['value']['metadata']['annotations'].get(ANNOTATIONS['hash'], None),
"protected":
resource['protected'],
"name":
self._resource_name(resource),
"kind":
kind,
"endpoint":
get_endpoint(resource['value']['kind'].lower()).format(namespace=self.namespace),
"body":
json.dumps(resource['value'])}
# @TODO do it in jsonnet
def _annotate_resource(self, kub, resource):
if 'annotations' not in resource['value']['metadata']:
resource['value']['metadata']['annotations'] = {}
if resource.get('hash', True):
# Hash is calculated later
resource['value']['metadata']['annotations'][ANNOTATIONS['hash']] = None
annotation = resource['value']['metadata']['annotations']
annotation[ANNOTATIONS['version']] = kub.version
annotation[ANNOTATIONS['package']] = kub.name
annotation[ANNOTATIONS['parent']] = self.name
annotation[ANNOTATIONS['update-mode']] = str(
resource.get('update_mode', annotation.get(ANNOTATIONS['update-mode'],
'update'))).lower()
annotation[ANNOTATIONS['protected']] = str(
resource.get('protected', annotation.get(ANNOTATIONS['protected'], 'false'))).lower()
return resource
def _create_namespaces(self):
if self.namespace:
ns = self.create_namespace(self.namespace)
self._resources.insert(0, ns)
def resources(self):
""" Override resources to auto-create namespace"""
if self._resources is None:
self._resources = self.manifest.resources
self._create_namespaces()
return self._resources
def _apply_patches(self, resources):
for _, resource in resources.iteritems():
if self.namespace:
if 'namespace' in resource['value']['metadata']:
op = 'replace'
else:
op = 'add'
resource['patch'].append({
"op": op,
"path": "/metadata/namespace",
"value": self.namespace})
if len(resource['patch']):
patch = jsonpatch.JsonPatch(resource['patch'])
result = patch.apply(resource['value'])
resource['value'] = result
return resources
@property
def kubClass(self):
return Kub
def create_namespace(self, namespace):
value = {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": namespace}}
resource = {
"file": "%s-ns.yaml" % namespace,
"name": namespace,
"generated": True,
"order": -1,
"hash": False,
"protected": True,
"update_mode": 'update',
"value": value,
"patch": [],
"variables": {},
"type": "namespace"}
return resource
def build(self):
result = []
for kub in self.dependencies:
result.append(self._dep_build(kub))
return {"deploy": result, "package": {"name": self.name, "version": self.version}}
def _dep_build(self, kub):
package = {
"package": kub.name,
"version": kub.version,
"namespace": kub.namespace,
"resources": []}
for resource in kub.resources():
package['resources'].\
append(self._resource_build(kub, resource))
return package
def _process_deploy(self, dry=False, force=False, fmt="txt", proxy=None, action="create",
dest="/tmp/appr"):
def output_progress(kubsource, status, fmt="text"):
if fmt == 'text':
print(" --> %s (%s): %s" % (kubsource.name, kubsource.kind, colorize(status)))
dest = os.path.join(dest, self.name, self.version)
mkdir_p(dest)
table = []
results = []
if fmt == "text":
print("%s %s " % (action, self.name))
i = 0
for kub in self.dependencies:
package = self._dep_build(kub)
i += 1
pname = package["package"]
version = package["version"]
namespace = package["namespace"]
if fmt == "text":
print("\n %02d - %s:" % (i, package["package"]))
for resource in package["resources"]:
body = resource["body"]
endpoint = resource["endpoint"]
# Use API instead of kubectl
with open(
os.path.join(dest, "%s-%s" % (resource['name'],
resource['file'].replace("/", "_"))),
'wb') as f:
f.write(body)
kubresource = Kubernetes(namespace=namespace, body=body, endpoint=endpoint,
proxy=proxy)
status = getattr(kubresource, action)(force=force, dry=dry, strategy=resource.get(
'update_mode', 'update'))
if fmt == "text":
output_progress(kubresource, status)
result_line = OrderedDict(
[("package", pname), ("version", version), ("kind", kubresource.kind),
("dry", dry), ("name", kubresource.name),
("namespace", kubresource.namespace), ("status", status)])
if status != 'ok' and action == 'create':
kubresource.wait(os.getenv("APPR_KUB_SECONDS", 0.1))
results.append(result_line)
if fmt == "text":
header = ["package", "version", "kind", "name", "namespace", "status"]
display_line = []
for k in header:
display_line.append(result_line[k])
table.append(display_line)
if fmt == "text":
print(print_deploy_result(table))
return results
def deploy(self, *args, **kwargs):
kwargs['action'] = 'create'
return self._process_deploy(*args, **kwargs)
def delete(self, *args, **kwargs):
kwargs['action'] = 'delete'
return self._process_deploy(*args, **kwargs)
| {
"repo_name": "app-registry/appr",
"path": "appr/formats/appr/kub.py",
"copies": "2",
"size": "7520",
"license": "apache-2.0",
"hash": 53601803445983910,
"line_mean": 36.9797979798,
"line_max": 98,
"alpha_frac": 0.5199468085,
"autogenerated": false,
"ratio": 4.460260972716489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5980207781216489,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import os.path
import re
import jinja2
import yaml
import appr.template_filters as filters
from appr.utils import convert_utf8
logger = logging.getLogger(__name__)
with open(os.path.join(os.path.dirname(__file__), "jsonnet/manifest.jsonnet.j2")) as f:
JSONNET_TEMPLATE = f.read()
def yaml_to_jsonnet(manifestyaml, tla_codes=None):
jinja_env = jinja2.Environment()
jinja_env.filters.update(filters.jinja_filters())
# 1. Resolve old manifest variables
# Load 'old' manifest.yaml
tempvars = {"manifest": convert_utf8(json.loads(json.dumps(yaml.load(manifestyaml))))}
# Get variable from the 'old' manfiest and update them
variables = tempvars['manifest'].get("variables", {})
if tla_codes is not None and 'params' in tla_codes:
tla = json.loads(tla_codes['params']).get("variables", {})
variables.update(tla)
# Resolve the templated variables inside the 'old' manifest
manifest_tpl = jinja_env.from_string(manifestyaml)
# 2. Convert 'old' manifest.yaml to manifest.jsonnet
rendered_manifestyaml = manifest_tpl.render(variables)
v = {"manifest": convert_utf8(json.loads(json.dumps(yaml.load(rendered_manifestyaml))))}
# Load the yaml -> jsonnet template
template = jinja_env.from_string(JSONNET_TEMPLATE)
templatedjsonnet = template.render(v)
# @TODO keep yaml format and escape 'jsonnet' commands: key: "<% $.variables.key %>"
# jsonnet_str = re.sub(r'[\'"]<%(.*)%>["\']', r"\1", templatedjsonnet)
return templatedjsonnet
class RenderJsonnet(object):
def __init__(self, files=None, manifestpath=None, lib_dirs=[]):
self.manifestdir = None
if manifestpath:
self.manifestdir = os.path.dirname(manifestpath)
self.files = files
lib_dirs.append(os.path.join(os.path.dirname(__file__), "jsonnet/lib"))
self.lib_dirs = lib_dirs
# Returns content if worked, None if file not found, or throws an exception
def try_path(self, path, rel):
if rel[0] == '/':
full_path = rel
else:
full_path = path + rel
if full_path[-1] == '/':
raise RuntimeError('Attempted to import a directory')
if not rel:
raise RuntimeError('Got invalid filename (empty string).')
if self.files is not None and full_path in self.files:
if self.files[full_path] is None:
with open(full_path) as f:
self.files[full_path] = f.read()
return rel, self.files[full_path]
# @TODO(ant31) fail if full_path is absolute
elif self.manifestdir and os.path.isfile(os.path.join(self.manifestdir, full_path)):
filepath = os.path.join(self.manifestdir, full_path)
with open(filepath) as f:
return rel, f.read()
else:
for libdir in self.lib_dirs:
libpath = os.path.join(libdir, rel)
if os.path.isfile(libpath):
with open(libpath) as f:
return rel, f.read()
if not os.path.isfile(full_path):
return full_path, None
with open(full_path) as f:
return full_path, f.read()
def import_callback(self, path, rel):
full_path, content = self.try_path(path, rel)
if content:
return full_path, content
raise RuntimeError('File not found')
def render_jsonnet(self, manifeststr, tla_codes=None):
# @TODO(ant31): workaround until jsonnet compile on windows
import _jsonnet
try:
json_str = _jsonnet.evaluate_snippet( # pylint: disable=no-member
"snippet", manifeststr, import_callback=self.import_callback,
native_callbacks=filters.jsonnet_callbacks(), tla_codes=tla_codes)
except RuntimeError as e:
print("tla_codes: %s" % (str(tla_codes)))
print("\n".join([
"%s %s" % (i, line) for i, line in enumerate([
l for l in manifeststr.split("\n") if re.match(r"^ *#", l) is None])]))
raise e
return json.loads(json_str)
| {
"repo_name": "app-registry/appr",
"path": "appr/render_jsonnet.py",
"copies": "2",
"size": "4262",
"license": "apache-2.0",
"hash": -7556183672561099000,
"line_mean": 37.7454545455,
"line_max": 92,
"alpha_frac": 0.61238855,
"autogenerated": false,
"ratio": 3.7222707423580785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5334659292358079,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import os
import sys
import shutil
import requests
import logging
import tempfile
import zipfile
from subprocess import Popen, PIPE
import warnings
from .exceptions import CondaException
from .utils import shell_out
mini_file = "Miniconda-latest.sh"
miniconda_urls = {"linux": "https://repo.continuum.io/miniconda/"
"Miniconda3-latest-Linux-x86_64.sh",
"darwin": "https://repo.continuum.io/miniconda/"
"Miniconda3-latest-MacOSX-x86_64.sh",
"win": "https://repo.continuum.io/miniconda/"
"Miniconda3-latest-Windows-x86_64.exe"}
logger = logging.getLogger(__name__)
here = os.path.dirname(__file__)
def miniconda_url():
"""What to download for this platform"""
if sys.platform.startswith('linux'):
url = miniconda_urls['linux']
elif sys.platform.startswith('darwin'): # pragma: no cover
url = miniconda_urls['darwin']
else: # pragma: no cover
url = miniconda_urls['win']
if not sys.maxsize > 2 ** 32: # pragma: no cover
# 64bit check
url = url.replace("_64", "")
return url
class CondaCreator(object):
"""
Create Conda Env
The parameters below can generally be guessed from the system.
If `conda info` is required, it will only be run on the first
invocation, and the result cached.
Parameters
----------
conda_root: str
Location of a conda installation. The conda executable is expected
at /bin/conda within.
If None, runs `conda info` to find out relevant information.
If no conda is found for `conda info`, or no conda executable exists
within the given location, will download and install miniconda
at that location. If the value is None, that location is within the
source tree.
conda_envs: str
directory in which to create environments; usually within the
source directory (so as not to pollute normal usage of conda)
miniconda_url: str
location to download miniconda from, if needed. Uses `miniconda_urls`
for the appropriate platform if not given.
channels: list of str
Channels to specify to conda. Note that defaults in .condarc will also
be included
conda_pkgs: str
Directory containing cached conda packages, normally within conda_root.
"""
conda_info = {}
def __init__(self, conda_root=None, conda_envs=None, miniconda_url=None,
channels=None, conda_pkgs=None):
if conda_root is None:
self._get_conda_info()
if self.conda_info:
self.conda_root = self.conda_info['conda_prefix']
else:
self.conda_root = os.path.join(here, 'tmp_conda')
else:
self.conda_root = conda_root
self.conda_bin = os.path.join(self.conda_root, 'bin', 'conda')
if not os.path.exists(self.conda_bin):
self._install_miniconda(self.conda_root, miniconda_url)
self.conda_envs = conda_envs or os.sep.join([here, 'tmp_conda', 'envs'])
self.conda_pkgs = conda_pkgs
self.channels = channels or []
def _install_miniconda(self, root, url):
if os.path.exists(root):
# conda wants to create the dir - plus this errors if location
# is not empty
os.rmdir(root)
url = url or miniconda_url()
tmp = tempfile.mkdtemp()
minifile = os.path.join(tmp, 'Miniconda3')
logger.debug("Downloading latest Miniconda.sh")
r = requests.get(url, stream=True)
with open(minifile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
install_cmd = "bash {0} -b -p {1}".format(minifile, root).split()
logger.debug("Installing Miniconda in {0}".format(root))
proc = Popen(install_cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
self.conda_info['conda_prefix'] = root
def _get_conda_info(self):
"""Ask a conda on PATH where it is installed"""
if self.conda_info:
# already did this before
return
try:
self.conda_info.update(json.loads(shell_out(
['conda', 'info', '--json'])))
except (OSError, IOError):
warnings.warn('No conda found on PATH')
def _create_env(self, env_name, packages=None, remove=False):
"""
Create Conda env environment. If env_name is found in self.conda_envs,
if will be used without checking the existence of any given packages
Parameters
----------
env_name : str
packages : list
remove : bool
remove environment should it exist - start from
Returns
-------
path : str
path to newly created conda environment
"""
env_path = os.path.join(self.conda_envs, env_name)
if os.path.exists(env_path):
if not remove:
# assume env is OK, ignore packages.
return env_path
shutil.rmtree(env_path)
if not isinstance(packages, list):
raise TypeError("Packages must be a list of strings")
ch = []
[ch.extend(['-c', c]) for c in self.channels]
cmd = [self.conda_bin, 'create', '-p', env_path, '-y',
'-q'] + packages + ch
logger.info("Creating new env {0}".format(env_name))
logger.info(' '.join(cmd))
env = dict(os.environ)
if self.conda_pkgs:
env['CONDA_PKGS_DIRS'] = self.conda_pkgs
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
env_python = os.path.join(env_path, 'bin', 'python')
if not os.path.exists(env_python):
raise CondaException("Failed to create Python binary at %s."
"" % env_python)
return env_path
def find_env(self, env_name):
"""
Find full path to env_name
Parameters
----------
env_name : str
Returns
-------
path : str
path to conda environment
"""
env_path = os.path.join(self.conda_envs, env_name)
if os.path.exists(env_path):
return env_path
def create_env(self, env_name, packages=None, remove=False):
"""
Create zipped directory of a conda environment
Parameters
----------
env_name : str
packages : list
remove : bool
remove environment should it exist
Returns
-------
path : str
path to zipped conda environment
"""
if not packages:
env_path = self.find_env(env_name)
else:
env_path = self._create_env(env_name, packages, remove)
return zip_path(env_path)
def zip_path(path, out_file=None):
"""
Zip up directory
Parameters
----------
path : string
out_path : str
Output zip-file; if note given, same as input path with
.zip appended
Returns
-------
path : string
path to zipped file
"""
fname = os.path.basename(path) + '.zip'
env_dir = os.path.dirname(path)
zFile = out_file or os.path.join(env_dir, fname)
# ZipFile does not have a contextmanager in Python 2.6
f = zipfile.ZipFile(zFile, 'w', allowZip64=True)
try:
logger.info('Creating: %s' % zFile)
for root, dirs, files in os.walk(path):
for file in files:
relfile = os.path.join(
os.path.relpath(root, env_dir), file)
absfile = os.path.join(root, file)
try:
os.stat(absfile)
except OSError:
logger.info('Skipping zip for %s' % absfile)
continue
f.write(absfile, relfile)
return zFile
finally:
f.close()
| {
"repo_name": "blaze/knit",
"path": "knit/env.py",
"copies": "2",
"size": "8326",
"license": "bsd-3-clause",
"hash": -689858093141764200,
"line_mean": 31.0230769231,
"line_max": 80,
"alpha_frac": 0.5679798222,
"autogenerated": false,
"ratio": 4.004810004810005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5572789827010005,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import os
from appr.exception import Forbidden, PackageAlreadyExists
from appr.models.blob_base import BlobBase
from appr.models.channel_base import ChannelBase
from appr.models.package_base import PackageBase
class ApprDB(object):
Channel = ChannelBase
Package = PackageBase
Blob = BlobBase
@classmethod
def restore_backup(cls, data):
""" bulk add data in db """
i = 0
size = len(data['packages'])
for package_data in data['packages']:
i += 1
package = cls.Package(package_data['package'], package_data['release'])
package.data = package_data
package.blob = cls.Blob(package.package, package_data['blob'])
try:
package.save(False)
# print '%s/%s restored: %s(%s) - %s' % (str(i), str(size),
# package.package, package.release, package.media_type)
except PackageAlreadyExists:
pass
# print '%s/%s existed: %s(%s) - %s' % (str(i), str(size),
# package.package, package.release, package.media_type)
for channel_name in package_data['channels']:
channel = cls.Channel(channel_name, package.package)
channel.add_release(package.release, cls.Package)
# print "%s/%s restored-channel-release: %s, %s, %s" % (str(i), str(size),
# channel.package, channel.name, package.release)
i = 0
size = len(data['channels'])
for channel_data in data['channels']:
i += 1
channel = cls.Channel(channel_data['name'], channel_data['package'])
channel.add_release(channel_data['current'], cls.Package)
print("%s/%s restored-channel: %s" % (str(i), str(size), channel.name))
@classmethod
def restore_backup_from_file(cls, filepath):
""" bulk add data in db """
with open(filepath, 'rb') as f:
data = json.load(f)
return cls.restore_backup(data)
@classmethod
def reset_db(cls, force=False):
""" clean the database """
if os.getenv("APPR_DB_ALLOW_RESET", "false") == "true" or force:
raise NotImplementedError
else:
raise Forbidden("Reset DB is deactivated")
@classmethod
def backup(cls):
data = {'packages': cls.Package.dump_all(cls.Blob), 'channels': cls.Channel.dump_all()}
return data
@classmethod
def backup_to_file(cls, filepath):
with open(filepath, 'wb') as f:
json.dump(cls.backup(), f)
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/db_base.py",
"copies": "2",
"size": "2810",
"license": "apache-2.0",
"hash": -5053799959045351000,
"line_mean": 36.4666666667,
"line_max": 119,
"alpha_frac": 0.5544483986,
"autogenerated": false,
"ratio": 4.108187134502924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011210357772001571,
"num_lines": 75
} |
from __future__ import absolute_import, division, print_function
import json
import os
from ..qt.widgets import ScatterWidget, HistogramWidget
from ..core import Subset
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = list(map(save_plot, page, range(len(page))))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
dispatch = {ScatterWidget: save_scatter,
HistogramWidget: save_histogram}
typ = type(plot)
return dispatch[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.qt.widgets.scatter_widget.ScatterWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=props['xatt'].label,
range=[props['xmin'], props['xmax']])
result['yAxis'] = dict(columnName=props['yatt'].label,
range=[props['ymin'], props['ymax']])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.qt.widgets.histogram_widget.HistogramWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=props['component'].label,
bins=props['nbins'],
range=[props['xmin'], props['xmax']])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
data = dc[0]
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, (ScatterWidget, HistogramWidget)):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
from astropy.table import Table, Column
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = list(map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets))
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
launch(path)
def launch(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from SocketServer import TCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print('Serving D3PO on port 0.0.0.0:%i' % PORT)
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
def setup():
from ..config import exporters
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
| {
"repo_name": "JudoWill/glue",
"path": "glue/plugins/export_d3po.py",
"copies": "1",
"size": "8773",
"license": "bsd-3-clause",
"hash": 1987512367408500500,
"line_mean": 28.0496688742,
"line_max": 121,
"alpha_frac": 0.6130172119,
"autogenerated": false,
"ratio": 3.6938947368421053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4806911948742105,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import os.path as op
import six
import numpy as np
import scipy.signal as sig
import scipy.io.wavfile as sciwav
MAXINT16 = 2**15 - 1
FS = 44100
COEFF_DIR = op.join(op.dirname(op.abspath(__file__)), 'coeffs')
def normalize(data, maxamp=1):
data *= maxamp / max(abs(data))
def load_coeffs(fname):
with open(op.join(COEFF_DIR, fname)) as f:
return json.load(f)
POTS_COEFFS = load_coeffs('pots.json')
def pots(data, snr=30, seed=None):
if seed is not None:
np.random.seed(seed)
# ensure mono
if data.ndim != 1:
data = data[:,0]
# convert to float, but simulate 16-bit quantization if needed
orig_dtype = data.dtype
data = data.astype('float')
if orig_dtype != 'int16':
normalize(data, maxamp=MAXINT16)
np.around(data, out=data)
normalize(data)
# pad start and end
#leader_len = np.random.randint(0.1 * FS, 0.4 * FS)
#trailer_len = 0.5 * FS - leader_len
#data = np.concatenate([np.zeros(leader_len), data, np.zeros(trailer_len)])
# do filtering
for b, a in POTS_COEFFS['signal']:
data = sig.lfilter(b, a, data)
# add band-limited noise (filtered white noise)
#np.random.seed(0)
noise = 10**(-snr/20) * ((np.random.random(size=data.shape) * 2) - 1)
for b, a in POTS_COEFFS['noiseband']:
noise = sig.lfilter(b, a, noise)
data += noise
# renormalize and convert to 16-bit integers
normalize(data, maxamp=MAXINT16)
data = data.astype('int16')
return data
class DigitalStreamFilter(object):
mimes = {
'wav': 'audio/vnd.wave',
'txt': 'text/plain',
'json': 'application/json',
}
output_suffix = 'filtered'
def __init__(self, data=None, stream=None, filename=None, dtype=None):
if dtype is None and filename is None:
try:
# werkzeug.FileStorage has 'filename', python files have 'name'
filename = getattr(stream, 'filename', getattr(stream, 'name'))
except AttributeError:
raise ValueError("Can't determine type from stream. "
"Provide dtype or filename to infer type.")
if dtype is None:
dtype = filename.split('.')[-1]
self.dtype = dtype
self.filename = filename
self.json_extra = {}
if data is not None:
self.data = np.array(data)
elif stream is not None:
self.load(stream)
else:
with open(filename, 'rb') as stream:
self.load(stream)
def load(self, stream):
dispatcher = {
'wav': self._load_wave,
'txt': self._load_text,
'json': self._load_json,
}
try:
data = dispatcher[self.dtype](stream)
except KeyError:
raise TypeError('Unsupported input type: {} (accepts {})'.format(
self.dtype, ', '.join(dispatcher.keys())))
self.data = np.array(data)
def process(self, *args, **kwargs):
raise NotImplementedError('abstract method')
def dump(self, stream, dtype=None):
if dtype is None:
dtype = self.dtype
{'wav': self._dump_wave,
'txt': self._dump_text,
'json': self._dump_json,
}[dtype](stream)
def suggested_name(self):
parts = self.filename.split('.')[:-1]
parts.extend([self.output_suffix, self.dtype])
return '.'.join(parts)
def mimetype(self):
return self.mimes[self.dtype]
def _load_wave(self, stream):
rate, data = sciwav.read(stream)
return data
def _load_text(self, stream):
return np.loadtxt(stream, dtype='int16')
def _load_json(self, stream):
return np.array(json.load(stream))
def _dump_wave(self, stream):
sciwav.write(stream, FS, self.data)
def _dump_text(self, stream):
np.savetxt(stream, self.data, fmt='%d')
def _dump_json(self, stream):
json.dump({'data': self.data.tolist(), 'rate': FS}, stream)
class POTSFilter(DigitalStreamFilter):
output_suffix = 'pots-filtered'
def process(self, *args, **kwargs):
self.data = pots(self.data, *args, **kwargs)
| {
"repo_name": "nicktimko/pots-sim",
"path": "potsim/filters.py",
"copies": "1",
"size": "4324",
"license": "mit",
"hash": -9061933648046750000,
"line_mean": 27.2614379085,
"line_max": 79,
"alpha_frac": 0.5811748381,
"autogenerated": false,
"ratio": 3.5794701986754967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4660645036775497,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import random
import numpy as np
from stt_utils import calc_feat_dim, spectrogram_from_file
from config_util import generate_file_path
from log_util import LogUtil
from label_util import LabelUtil
from stt_bi_graphemes_util import generate_bi_graphemes_label
class DataGenerator(object):
def __init__(self, save_dir, model_name, step=10, window=20, max_freq=8000, desc_file=None):
"""
Params:
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
desc_file (str, optional): Path to a JSON-line file that contains
labels and paths to the audio files. If this is None, then
load metadata right away
"""
#calc_feat_dim returns int(0.001*window*max_freq)+1
super(DataGenerator, self).__init__()
# feat_dim=0.001*20*8000+1=161
self.feat_dim = calc_feat_dim(window, max_freq)
# 1d 161 length of array filled with zeros
self.feats_mean = np.zeros((self.feat_dim,))
# 1d 161 length of array filled with 1s
self.feats_std = np.ones((self.feat_dim,))
self.max_input_length = 0
self.max_length_list_in_batch =[]
# 1d 161 length of array filled with random value
#[0.0, 1.0)
self.rng = random.Random()
if desc_file is not None:
self.load_metadata_from_desc_file(desc_file)
self.step = step
self.window = window
self.max_freq = max_freq
self.save_dir = save_dir
self.model_name = model_name
def get_meta_from_file(self, feats_mean, feats_std):
self.feats_mean = feats_mean
self.feats_std = feats_std
def featurize(self, audio_clip, overwrite=False):
""" For a given audio clip, calculate the log of its Fourier Transform
Params:
audio_clip(str): Path to the audio clip
"""
return spectrogram_from_file(
audio_clip, step=self.step, window=self.window,
max_freq=self.max_freq, overwrite=overwrite)
def load_metadata_from_desc_file(self, desc_file, partition='train',
max_duration=16.0,):
""" Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on
"""
logger = LogUtil().getlogger()
logger.info('Reading description file: {} for partition: {}'
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
logger.warn('Error reading line #{}: {}'
.format(line_num, json_line))
logger.warn(str(e))
if partition == 'train':
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == 'validation':
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
def load_train_data(self, desc_file):
self.load_metadata_from_desc_file(desc_file, 'train')
def load_validation_data(self, desc_file):
self.load_metadata_from_desc_file(desc_file, 'validation')
@staticmethod
def sort_by_duration(durations, audio_paths, texts):
return zip(*sorted(zip(durations, audio_paths, texts)))
def normalize(self, feature, eps=1e-14):
return (feature - self.feats_mean) / (self.feats_std + eps)
def get_max_label_length(self, partition, is_bi_graphemes=False):
if partition == 'train':
texts = self.train_texts + self.val_texts
elif partition == 'test':
texts = self.train_texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
if is_bi_graphemes:
self.max_label_length = max([len(generate_bi_graphemes_label(text)) for text in texts])
else:
self.max_label_length = max([len(text) for text in texts])
return self.max_label_length
def get_max_seq_length(self, partition):
if partition == 'train':
audio_paths = self.train_audio_paths + self.val_audio_paths
durations = self.train_durations + self.val_durations
elif partition == 'test':
audio_paths = self.train_audio_paths
durations = self.train_durations
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
max_duration_indexes = durations.index(max(durations))
max_seq_length = self.featurize(audio_paths[max_duration_indexes]).shape[0]
self.max_seq_length=max_seq_length
return max_seq_length
def prepare_minibatch(self, audio_paths, texts, overwrite=False, is_bi_graphemes=False):
""" Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents
"""
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
# Features is a list of (timesteps, feature_dim) arrays
# Calculate the features for each audio clip, as the log of the
# Fourier Transform of the audio
features = [self.featurize(a, overwrite=overwrite) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
# Pad all the inputs so that they are all the same length
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat) # Center using means and std
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)
'y': y, # list(int) Flattened labels (integer sequences)
'texts': texts, # list(str) Original texts
'input_lengths': input_lengths, # list(int) Length of each input
'label_lengths': label_lengths, # list(int) Length of each label
}
def iterate_test(self, minibatch_size=16):
return self.iterate(self.test_audio_paths, self.test_texts,
minibatch_size)
def iterate_validation(self, minibatch_size=16):
return self.iterate(self.val_audio_paths, self.val_texts,
minibatch_size)
def sample_normalize(self, k_samples=1000, overwrite=False):
""" Estimate the mean and std of the features from the training set
Params:
k_samples (int): Use this number of samples for estimation
"""
# if k_samples is negative then it goes through total dataset
if k_samples < 0:
audio_paths_iter = iter(self.audio_paths)
# using sample
else:
k_samples = min(k_samples, len(self.train_audio_paths))
samples = self.rng.sample(self.train_audio_paths, k_samples)
audio_paths_iter = iter(samples)
audio_clip = audio_paths_iter.next()
feat = self.featurize(audio_clip=audio_clip, overwrite=overwrite)
feat_squared = np.square(feat)
count = float(feat.shape[0])
dim = feat.shape[1]
for iter_index in range(len(samples) - 1):
next_feat = self.featurize(audio_clip=audio_paths_iter.next(), overwrite=overwrite)
next_feat_squared = np.square(next_feat)
feat_vertically_stacked = np.concatenate((feat, next_feat)).reshape(-1, dim)
feat = np.sum(feat_vertically_stacked, axis=0, keepdims=True)
feat_squared_vertically_stacked = np.concatenate((feat_squared, next_feat_squared)).reshape(-1, dim)
feat_squared = np.sum(feat_squared_vertically_stacked, axis=0, keepdims=True)
count = count + float(next_feat.shape[0])
self.feats_mean = feat / float(count)
self.feats_std = np.sqrt(feat_squared / float(count) - np.square(self.feats_mean))
np.savetxt(generate_file_path(self.save_dir, self.model_name, 'feats_mean'), self.feats_mean)
np.savetxt(generate_file_path(self.save_dir, self.model_name, 'feats_std'), self.feats_std)
| {
"repo_name": "arikpoz/mxnet",
"path": "example/speech_recognition/stt_datagenerator.py",
"copies": "11",
"size": "10656",
"license": "apache-2.0",
"hash": -3992392670688260000,
"line_mean": 44.9310344828,
"line_max": 112,
"alpha_frac": 0.5914039039,
"autogenerated": false,
"ratio": 3.919087899963222,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001241933255840405,
"num_lines": 232
} |
from __future__ import absolute_import, division, print_function
import json
import tarfile
from os.path import basename
def dist_fn(fn):
if fn.endswith('.tar'):
return fn[:-4]
elif fn.endswith('.tar.bz2'):
return fn[:-8]
else:
raise Exception('did not expect filename: %r' % fn)
class TarCheck(object):
def __init__(self, path):
self.t = tarfile.open(path)
self.paths = set(m.path for m in self.t.getmembers())
self.dist = dist_fn(basename(path))
self.name, self.version, self.build = self.dist.rsplit('-', 2)
def info_files(self):
if 'py_' in self.build:
return
lista = [p.strip().decode('utf-8') for p in
self.t.extractfile('info/files').readlines()]
seta = set(lista)
if len(lista) != len(seta):
raise Exception('info/files: duplicates')
listb = [m.path for m in self.t.getmembers()
if not (m.path.startswith('info/') or m.isdir())]
setb = set(listb)
if len(listb) != len(setb):
raise Exception('info_files: duplicate members')
if seta == setb:
return
for p in sorted(seta | setb):
if p not in seta:
print('%r not in info/files' % p)
if p not in setb:
print('%r not in tarball' % p)
raise Exception('info/files')
def index_json(self):
info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
for varname in 'name', 'version', 'build':
if info[varname] != getattr(self, varname):
raise Exception('%s: %r != %r' % (varname, info[varname],
getattr(self, varname)))
assert isinstance(info['build_number'], int)
def check_all(path):
x = TarCheck(path)
x.info_files()
x.index_json()
x.t.close()
| {
"repo_name": "rmcgibbo/conda-build",
"path": "conda_build/tarcheck.py",
"copies": "8",
"size": "1943",
"license": "bsd-3-clause",
"hash": 429311510727633800,
"line_mean": 30.8524590164,
"line_max": 87,
"alpha_frac": 0.5409161091,
"autogenerated": false,
"ratio": 3.679924242424242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018628912071535022,
"num_lines": 61
} |
from __future__ import absolute_import, division, print_function
import json
import time
from workflows.services.common_service import CommonService
class SampleConsumer(CommonService):
'''An example service building on top of the workflow.services architecture,
demonstrating how this architecture can be used.
This service consumes messages off a queue.'''
# Human readable service name
_service_name = "Message Consumer"
# Logger name
_logger_name = "workflows.service.sample_consumer"
def initializing(self):
'''Subscribe to a channel.'''
self._transport.subscribe('transient.destination', self.consume_message)
def consume_message(self, header, message):
'''Consume a message'''
logmessage = { 'time': (time.time() % 1000) * 1000,
'header': '',
'message': message }
if header:
logmessage['header'] = json.dumps(header, indent=2) + '\n' + \
'----------------' + '\n'
if isinstance(message, dict):
logmessage['message'] = json.dumps(message, indent=2) + '\n' + \
'----------------' + '\n'
print("=== Consume ====\n{header}{message}".format(**logmessage))
self.log.info("Received message @{time}".format(**logmessage))
self.log.debug("Received message @{time}\n{header}{message}".format(**logmessage))
time.sleep(0.1)
| {
"repo_name": "xia2/workflows",
"path": "workflows/services/sample_consumer.py",
"copies": "1",
"size": "1401",
"license": "bsd-3-clause",
"hash": 9056505030460099000,
"line_mean": 35.8684210526,
"line_max": 86,
"alpha_frac": 0.6174161313,
"autogenerated": false,
"ratio": 4.0964912280701755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008485777150603323,
"num_lines": 38
} |
from __future__ import absolute_import, division, print_function
import json
import unittest
from io import BytesIO
from ..sample import sample_geojson
class TestSample (unittest.TestCase):
def test_sample(self):
geojson_input = b'''{ "type": "FeatureCollection", "features": [
{ "type": "Feature", "geometry": {"type": "Point", "coordinates": [102.0, 0.5]}, "properties": {"prop0": "value0"} },
{ "type": "Feature", "geometry": { "type": "LineString", "coordinates": [ [102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0] ] }, "properties": { "prop0": "value0", "prop1": 0.0 } },
{ "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [ [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ] ] }, "properties": { "prop0": "value0", "prop1": {"this": "that"}, "prop2": true, "prop3": null } }
] }'''
geojson0 = json.loads(sample_geojson(BytesIO(geojson_input), max_features=0))
self.assertEqual(len(geojson0['features']), 0)
geojson1 = json.loads(sample_geojson(BytesIO(geojson_input), max_features=1))
self.assertEqual(len(geojson1['features']), 1)
geojson2 = json.loads(sample_geojson(BytesIO(geojson_input), max_features=2))
self.assertEqual(len(geojson2['features']), 2)
geojson3 = json.loads(sample_geojson(BytesIO(geojson_input), max_features=3))
self.assertEqual(len(geojson3['features']), 3)
geojson4 = json.loads(sample_geojson(BytesIO(geojson_input), max_features=4))
self.assertEqual(len(geojson4['features']), 3)
self.assertEqual(geojson0['type'], 'FeatureCollection')
self.assertEqual(geojson1['features'][0]['type'], 'Feature')
self.assertEqual(geojson1['features'][0]['properties']['prop0'], 'value0')
self.assertEqual(geojson1['features'][0]['geometry']['type'], 'Point')
self.assertEqual(len(geojson1['features'][0]['geometry']['coordinates']), 2)
self.assertEqual(geojson1['features'][0]['geometry']['coordinates'][0], 102.)
self.assertEqual(geojson1['features'][0]['geometry']['coordinates'][1], .5)
self.assertEqual(geojson2['features'][1]['geometry']['type'], 'LineString')
self.assertEqual(len(geojson2['features'][1]['geometry']['coordinates']), 4)
self.assertEqual(geojson2['features'][1]['geometry']['coordinates'][0][0], 102.)
self.assertEqual(geojson2['features'][1]['geometry']['coordinates'][0][1], 0.)
self.assertEqual(geojson3['features'][2]['geometry']['type'], 'Polygon')
self.assertEqual(len(geojson3['features'][2]['geometry']['coordinates']), 1)
self.assertEqual(geojson3['features'][2]['geometry']['coordinates'][0][0][0], 100.)
self.assertEqual(geojson3['features'][2]['geometry']['coordinates'][0][0][1], 0.)
| {
"repo_name": "slibby/machine",
"path": "openaddr/tests/sample.py",
"copies": "1",
"size": "2980",
"license": "isc",
"hash": -8177020759239320000,
"line_mean": 57.431372549,
"line_max": 271,
"alpha_frac": 0.5929530201,
"autogenerated": false,
"ratio": 3.7064676616915424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9680082803844718,
"avg_score": 0.02386757558936493,
"num_lines": 51
} |
from __future__ import absolute_import, division, print_function
import json
from blaze.catalog.blaze_url import split_array_base
import dynd
from dynd import nd
from dynd.nd import as_numpy
from blaze import array
class compute_session:
def __init__(self, base_url, array_name):
session_name, root_dir = array_provider.create_session_dir()
self.session_name = session_name
self.root_dir = root_dir
self.array_name = array_name
self.base_url = base_url
def get_session_array(self, array_name = None):
if array_name is None:
array_name = self.array_name
array_root, indexers = split_array_base(array_name)
arr = self.array_provider(array_root)
if arr is None:
raise Exception('No Blaze Array named ' + array_root)
for i in indexers:
if type(i) in [slice, int, tuple]:
arr = arr[i]
else:
arr = getattr(arr, i)
return arr
def creation_response(self):
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session' : self.base_url + self.session_name,
'version' : 'prototype',
'dynd_python_version': dynd.__version__,
'dynd_version' : dynd.__libdynd_version__,
'access' : 'no permission model yet'
})
return (content_type, body)
def close(self):
print('Deleting files for session %s' % self.session_name)
self.array_provider.delete_session_dir(self.session_name)
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'action': 'closed'
})
return (content_type, body)
def sort(self, json_cmd):
import numpy as np
print ('sorting')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
field = cmd['field']
arr = self.get_session_array(array_name)
nparr = as_numpy(arr)
idxs = np.argsort(nparr[field])
res = nd.ndobject(nparr[idxs])
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'sort_', res)
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'sort',
'params': {
'field': field,
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
def groupby(self, json_cmd):
print('GroupBy operation')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
arr = self.get_session_array(array_name)[...]._data.dynd_arr()
# Do the groupby, get its groups, then
# evaluate it because deferred operations
# through the groupby won't work well yet.
res = nd.groupby(arr, nd.fields(arr, *fields))
groups = res.groups
res = res.eval()
# Write out the groupby result
defarr_gb = self.array_provider.create_deferred_array_filename(
self.session_name, 'groupby_', array(res))
dshape_gb = nd.dshape_of(res)
defarr_gb[0].write(json.dumps({
'dshape': dshape_gb,
'command': 'groupby',
'params': {
'fields': fields
}
}))
defarr_gb[0].close()
# Write out the groups
defarr_groups = self.array_provider.create_deferred_array_filename(
self.session_name, 'groups_', groups)
dshape_groups = nd.dshape_of(groups)
defarr_groups[0].write(json.dumps({
'dshape': dshape_groups,
'command': 'groupby.groups',
'params': {
'fields': fields
}
}))
defarr_groups[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output_gb': self.base_url + defarr_gb[1],
'dshape_gb': dshape_gb,
'output_groups': self.base_url + defarr_groups[1],
'dshape_groups': dshape_groups
})
return (content_type, body)
def add_computed_fields(self, json_cmd):
print('Adding computed fields')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
rm_fields = cmd.get('rm_fields', [])
fnname = cmd.get('fnname', None)
arr = self.get_session_array(array_name)._data.dynd_arr()
res = nd.add_computed_fields(arr, fields, rm_fields, fnname)
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'computed_fields_', array(res))
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'add_computed_fields',
'params': {
'fields': fields,
'rm_fields': rm_fields,
'fnname': fnname
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
def make_computed_fields(self, json_cmd):
print('Adding computed fields')
cmd = json.loads(json_cmd)
array_url = cmd.get('input', self.base_url + self.array_name)
if not array_url.startswith(self.base_url):
raise RuntimeError('Input array must start with the base url')
array_name = array_url[len(self.base_url):]
fields = cmd['fields']
replace_undim = cmd.get('replace_undim', 0)
fnname = cmd.get('fnname', None)
arr = self.get_session_array(array_name)._data.dynd_arr()
res = nd.make_computed_fields(arr, replace_undim, fields, fnname)
defarr = self.array_provider.create_deferred_array_filename(
self.session_name, 'computed_fields_', array(res))
dshape = nd.dshape_of(res)
defarr[0].write(json.dumps({
'dshape': dshape,
'command': 'make_computed_fields',
'params': {
'fields': fields,
'replace_undim': replace_undim,
'fnname': fnname
}
}))
defarr[0].close()
content_type = 'application/json; charset=utf-8'
body = json.dumps({
'session': self.base_url + self.session_name,
'output': self.base_url + defarr[1],
'dshape': dshape
})
return (content_type, body)
| {
"repo_name": "xsixing/blaze",
"path": "blaze/io/server/compute_session.py",
"copies": "3",
"size": "7976",
"license": "bsd-3-clause",
"hash": 8057779961791861000,
"line_mean": 36.980952381,
"line_max": 75,
"alpha_frac": 0.5352306921,
"autogenerated": false,
"ratio": 3.8531400966183575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007285876846152695,
"num_lines": 210
} |
from __future__ import (absolute_import, division, print_function)
import json
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.internet import task, threads, defer
from danex import _dane, _tls
class DaneDoctorProtocol(Protocol):
def dataReceived(self, data):
domain = data.strip(" \n")
port = 443
proto = "tcp"
d = defer.gatherResults([
threads.deferToThread(
_dane.lookup_tlsa_records, domain, port, proto
),
_tls.retrieveCertificate(domain, port)
])
def onResults(res):
(trusted, tlsaRecords), serverCertificate = res
numRecs = len(tlsaRecords)
doesMatch = False
recs = []
for tlsa in tlsaRecords:
newRec = {
"usage": tlsa.usage.name,
"selector": tlsa.selector.name,
"matchingType": tlsa.matchingType.name,
"errors": tlsa.errors,
"valid": tlsa.valid,
}
if tlsa.valid and tlsa.matchesCertificate(serverCertificate):
newRec["matches"] = doesMatch = True
recs.append(newRec)
rv = {
"trusted": trusted,
"doesMatch": doesMatch,
"numRecs": numRecs,
"tlsaRecords": recs,
}
self.transport.write(json.dumps(rv))
self.transport.loseConnection()
d.addCallback(onResults)
return d
| {
"repo_name": "hynek/tnw",
"path": "dane_doctor/protocol.py",
"copies": "1",
"size": "1597",
"license": "mit",
"hash": -3885106744660154400,
"line_mean": 28.5740740741,
"line_max": 77,
"alpha_frac": 0.5272385723,
"autogenerated": false,
"ratio": 4.4484679665738165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475706538873817,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
import pytest
import stripe
class TestListObject(object):
@pytest.fixture
def list_object(self):
return stripe.ListObject.construct_from(
{"object": "list", "url": "/my/path", "data": ["foo"]}, "mykey"
)
def test_list(self, request_mock, list_object):
request_mock.stub_request(
"get",
"/my/path",
{"object": "list", "data": [{"object": "charge", "foo": "bar"}]},
)
res = list_object.list(myparam="you", stripe_account="acct_123")
request_mock.assert_requested(
"get", "/my/path", {"myparam": "you"}, None
)
assert isinstance(res, stripe.ListObject)
assert res.stripe_account == "acct_123"
assert isinstance(res.data, list)
assert isinstance(res.data[0], stripe.Charge)
assert res.data[0].foo == "bar"
def test_create(self, request_mock, list_object):
request_mock.stub_request(
"post", "/my/path", {"object": "charge", "foo": "bar"}
)
res = list_object.create(myparam="eter", stripe_account="acct_123")
request_mock.assert_requested(
"post", "/my/path", {"myparam": "eter"}, None
)
assert isinstance(res, stripe.Charge)
assert res.foo == "bar"
assert res.stripe_account == "acct_123"
def test_create_maintains_list_properties(self, request_mock, list_object):
# Testing with real requests because our mock makes it impossible to
# test otherwise
charge = stripe.Charge.retrieve("ch_123", api_key="sk_test_custom")
res = charge.refunds.create(amount=123)
request_mock.assert_requested(
"post", "/v1/charges/ch_123/refunds", {"amount": 123}, None
)
assert res.api_key == "sk_test_custom"
def test_retrieve(self, request_mock, list_object):
request_mock.stub_request(
"get", "/my/path/myid", {"object": "charge", "foo": "bar"}
)
res = list_object.retrieve(
"myid", myparam="cow", stripe_account="acct_123"
)
request_mock.assert_requested(
"get", "/my/path/myid", {"myparam": "cow"}, None
)
assert isinstance(res, stripe.Charge)
assert res.foo == "bar"
assert res.stripe_account == "acct_123"
def test_is_empty(self):
lo = stripe.ListObject.construct_from({"data": []}, None)
assert lo.is_empty is True
def test_empty_list(self):
lo = stripe.ListObject.empty_list()
assert lo.is_empty
def test_iter(self):
arr = [{"id": 1}, {"id": 2}, {"id": 3}]
expected = stripe.util.convert_to_stripe_object(arr)
lo = stripe.ListObject.construct_from({"data": arr}, None)
assert list(lo) == expected
def test_iter_reversed(self):
arr = [{"id": 1}, {"id": 2}, {"id": 3}]
expected = stripe.util.convert_to_stripe_object(list(reversed(arr)))
lo = stripe.ListObject.construct_from({"data": arr}, None)
assert list(reversed(lo)) == expected
def test_len(self, list_object):
assert len(list_object) == 1
def test_bool(self, list_object):
assert list_object
empty = stripe.ListObject.construct_from(
{"object": "list", "url": "/my/path", "data": []}, "mykey"
)
assert bool(empty) is False
def test_next_page(self, request_mock):
lo = stripe.ListObject.construct_from(
{
"object": "list",
"data": [{"id": 1}],
"has_more": True,
"url": "/things",
},
None,
)
request_mock.stub_request(
"get",
"/things",
{
"object": "list",
"data": [{"id": 2}],
"has_more": False,
"url": "/things",
},
)
next_lo = lo.next_page()
assert not next_lo.is_empty
assert next_lo.data[0].id == 2
def test_next_page_with_filters(self, request_mock):
lo = stripe.ListObject.construct_from(
{
"object": "list",
"data": [{"id": 1}],
"has_more": True,
"url": "/things",
},
None,
)
lo._retrieve_params = {"expand": ["data.source"], "limit": 3}
request_mock.stub_request(
"get",
"/things",
{
"object": "list",
"data": [{"id": 2}],
"has_more": False,
"url": "/things",
},
)
next_lo = lo.next_page()
assert next_lo._retrieve_params == {
"expand": ["data.source"],
"limit": 3,
"starting_after": 1,
}
def test_next_page_empty_list(self):
lo = stripe.ListObject.construct_from(
{
"object": "list",
"data": [{"id": 1}],
"has_more": False,
"url": "/things",
},
None,
)
next_lo = lo.next_page()
assert next_lo == stripe.ListObject.empty_list()
def test_prev_page(self, request_mock):
lo = stripe.ListObject.construct_from(
{
"object": "list",
"data": [{"id": 2}],
"has_more": True,
"url": "/things",
},
None,
)
request_mock.stub_request(
"get",
"/things",
{
"object": "list",
"data": [{"id": 1}],
"has_more": False,
"url": "/things",
},
)
previous_lo = lo.previous_page()
assert not previous_lo.is_empty
assert previous_lo.data[0].id == 1
def test_prev_page_with_filters(self, request_mock):
lo = stripe.ListObject.construct_from(
{
"object": "list",
"data": [{"id": 2}],
"has_more": True,
"url": "/things",
},
None,
)
lo._retrieve_params = {"expand": ["data.source"], "limit": 3}
request_mock.stub_request(
"get",
"/things",
{
"object": "list",
"data": [{"id": 1}],
"has_more": False,
"url": "/things",
},
)
previous_lo = lo.previous_page()
assert previous_lo._retrieve_params == {
"expand": ["data.source"],
"limit": 3,
"ending_before": 2,
}
def test_serialize_empty_list(self):
empty = stripe.ListObject.construct_from(
{"object": "list", "data": []}, "mykey"
)
serialized = str(empty)
deserialized = stripe.ListObject.construct_from(
json.loads(serialized), "mykey"
)
assert deserialized == empty
def test_serialize_nested_empty_list(self):
empty = stripe.ListObject.construct_from(
{"object": "list", "data": []}, "mykey"
)
obj = stripe.stripe_object.StripeObject.construct_from(
{"nested": empty}, "mykey"
)
serialized = str(obj)
deserialized = stripe.stripe_object.StripeObject.construct_from(
json.loads(serialized), "mykey"
)
assert deserialized.nested == empty
class TestAutoPaging:
@staticmethod
def pageable_model_response(ids, has_more):
return {
"object": "list",
"url": "/v1/pageablemodels",
"data": [{"id": id, "object": "pageablemodel"} for id in ids],
"has_more": has_more,
}
def test_iter_one_page(self, request_mock):
lo = stripe.ListObject.construct_from(
self.pageable_model_response(["pm_123", "pm_124"], False), "mykey"
)
request_mock.assert_no_request()
seen = [item["id"] for item in lo.auto_paging_iter()]
assert seen == ["pm_123", "pm_124"]
def test_iter_two_pages(self, request_mock):
lo = stripe.ListObject.construct_from(
self.pageable_model_response(["pm_123", "pm_124"], True), "mykey"
)
lo._retrieve_params = {"foo": "bar"}
request_mock.stub_request(
"get",
"/v1/pageablemodels",
self.pageable_model_response(["pm_125", "pm_126"], False),
)
seen = [item["id"] for item in lo.auto_paging_iter()]
request_mock.assert_requested(
"get",
"/v1/pageablemodels",
{"starting_after": "pm_124", "foo": "bar"},
None,
)
assert seen == ["pm_123", "pm_124", "pm_125", "pm_126"]
def test_iter_reverse(self, request_mock):
lo = stripe.ListObject.construct_from(
self.pageable_model_response(["pm_125", "pm_126"], True), "mykey"
)
lo._retrieve_params = {"foo": "bar", "ending_before": "pm_127"}
request_mock.stub_request(
"get",
"/v1/pageablemodels",
self.pageable_model_response(["pm_123", "pm_124"], False),
)
seen = [item["id"] for item in lo.auto_paging_iter()]
request_mock.assert_requested(
"get",
"/v1/pageablemodels",
{"ending_before": "pm_125", "foo": "bar"},
None,
)
assert seen == ["pm_126", "pm_125", "pm_124", "pm_123"]
def test_class_method_two_pages(self, request_mock):
request_mock.stub_request(
"get",
"/v1/charges",
{
"object": "list",
"data": [{"id": "ch_001"}],
"url": "/v1/charges",
"has_more": False,
},
)
seen = [
item["id"]
for item in stripe.Charge.auto_paging_iter(limit=25, foo="bar")
]
request_mock.assert_requested(
"get", "/v1/charges", {"limit": 25, "foo": "bar"}
)
assert seen == ["ch_001"]
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_list_object.py",
"copies": "1",
"size": "10295",
"license": "mit",
"hash": 4120485119183714000,
"line_mean": 29.0145772595,
"line_max": 79,
"alpha_frac": 0.4842156387,
"autogenerated": false,
"ratio": 3.7974917004795277,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781707339179528,
"avg_score": 0,
"num_lines": 343
} |
from __future__ import absolute_import, division, print_function
import json
import stripe
from stripe import six, util
from stripe.stripe_response import StripeResponse, StripeStreamResponse
class RequestMock(object):
def __init__(self, mocker):
self._mocker = mocker
self._real_request = stripe.api_requestor.APIRequestor.request
self._real_request_stream = (
stripe.api_requestor.APIRequestor.request_stream
)
self._stub_request_handler = StubRequestHandler()
self.constructor_patcher = self._mocker.patch(
"stripe.api_requestor.APIRequestor.__init__",
side_effect=stripe.api_requestor.APIRequestor.__init__,
autospec=True,
)
self.request_patcher = self._mocker.patch(
"stripe.api_requestor.APIRequestor.request",
side_effect=self._patched_request,
autospec=True,
)
self.request_stream_patcher = self._mocker.patch(
"stripe.api_requestor.APIRequestor.request_stream",
side_effect=self._patched_request_stream,
autospec=True,
)
def _patched_request(self, requestor, method, url, *args, **kwargs):
response = self._stub_request_handler.get_response(
method, url, expect_stream=False
)
if response is not None:
return response, stripe.api_key
return self._real_request(requestor, method, url, *args, **kwargs)
def _patched_request_stream(self, requestor, method, url, *args, **kwargs):
response = self._stub_request_handler.get_response(
method, url, expect_stream=True
)
if response is not None:
return response, stripe.api_key
return self._real_request_stream(
requestor, method, url, *args, **kwargs
)
def stub_request(self, method, url, rbody={}, rcode=200, rheaders={}):
self._stub_request_handler.register(
method, url, rbody, rcode, rheaders, is_streaming=False
)
def stub_request_stream(
self, method, url, rbody={}, rcode=200, rheaders={}
):
self._stub_request_handler.register(
method, url, rbody, rcode, rheaders, is_streaming=True
)
def assert_api_base(self, expected_api_base):
# Note that this method only checks that an API base was provided
# as a keyword argument in APIRequestor's constructor, not as a
# positional argument.
if "api_base" not in self.constructor_patcher.call_args[1]:
msg = (
"Expected APIRequestor to have been constructed with "
"api_base='%s'. No API base was provided." % expected_api_base
)
raise AssertionError(msg)
actual_api_base = self.constructor_patcher.call_args[1]["api_base"]
if actual_api_base != expected_api_base:
msg = (
"Expected APIRequestor to have been constructed with "
"api_base='%s'. Constructed with api_base='%s' "
"instead." % (expected_api_base, actual_api_base)
)
raise AssertionError(msg)
def assert_api_version(self, expected_api_version):
# Note that this method only checks that an API version was provided
# as a keyword argument in APIRequestor's constructor, not as a
# positional argument.
if "api_version" not in self.constructor_patcher.call_args[1]:
msg = (
"Expected APIRequestor to have been constructed with "
"api_version='%s'. No API version was provided."
% expected_api_version
)
raise AssertionError(msg)
actual_api_version = self.constructor_patcher.call_args[1][
"api_version"
]
if actual_api_version != expected_api_version:
msg = (
"Expected APIRequestor to have been constructed with "
"api_version='%s'. Constructed with api_version='%s' "
"instead." % (expected_api_version, actual_api_version)
)
raise AssertionError(msg)
def assert_requested(self, method, url, params=None, headers=None):
self.assert_requested_internal(
self.request_patcher, method, url, params, headers
)
def assert_requested_stream(self, method, url, params=None, headers=None):
self.assert_requested_internal(
self.request_stream_patcher, method, url, params, headers
)
def assert_requested_internal(self, patcher, method, url, params, headers):
params = params or self._mocker.ANY
headers = headers or self._mocker.ANY
called = False
exception = None
# Sadly, ANY does not match a missing optional argument, so we
# check all the possible signatures of the request method
possible_called_args = [
(self._mocker.ANY, method, url),
(self._mocker.ANY, method, url, params),
(self._mocker.ANY, method, url, params, headers),
]
for args in possible_called_args:
try:
patcher.assert_called_with(*args)
except AssertionError as e:
exception = e
else:
called = True
break
if not called:
raise exception
def assert_no_request(self):
if self.request_patcher.call_count != 0:
msg = (
"Expected 'request' to not have been called. "
"Called %s times." % (self.request_patcher.call_count)
)
raise AssertionError(msg)
def assert_no_request_stream(self):
if self.request_stream_patcher.call_count != 0:
msg = (
"Expected 'request_stream' to not have been called. "
"Called %s times." % (self.request_stream_patcher.call_count)
)
raise AssertionError(msg)
def reset_mock(self):
self.request_patcher.reset_mock()
self.request_stream_patcher.reset_mock()
class StubRequestHandler(object):
def __init__(self):
self._entries = {}
def register(
self, method, url, rbody={}, rcode=200, rheaders={}, is_streaming=False
):
self._entries[(method, url)] = (rbody, rcode, rheaders, is_streaming)
def get_response(self, method, url, expect_stream=False):
if (method, url) in self._entries:
rbody, rcode, rheaders, is_streaming = self._entries.pop(
(method, url)
)
if expect_stream != is_streaming:
return None
if not isinstance(rbody, six.string_types):
rbody = json.dumps(rbody)
if is_streaming:
stripe_response = StripeStreamResponse(
util.io.BytesIO(str.encode(rbody)), rcode, rheaders
)
else:
stripe_response = StripeResponse(rbody, rcode, rheaders)
return stripe_response
return None
| {
"repo_name": "stripe/stripe-python",
"path": "tests/request_mock.py",
"copies": "1",
"size": "7167",
"license": "mit",
"hash": 9179226620747470000,
"line_mean": 34.835,
"line_max": 79,
"alpha_frac": 0.5826705735,
"autogenerated": false,
"ratio": 4.223335297583971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5306005871083971,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import json
class ManifestError(Exception):
"""Something is wrong with the manifest format."""
pass
def _make_property(name):
@property
def getter(self):
return self._data.get(name)
return getter
_required_keys = {'qss_file', 'theme_name', 'version', 'author'}
class ThemeManifest(object):
"""Theme manifest. Read access to basic info about themes."""
def __init__(self, file):
try:
self._data = json.load(file)
except (TypeError, ValueError) as exc:
raise ManifestError('Bad manifest: ' + str(exc))
missing_keys = _required_keys - set(self._data.keys())
if missing_keys:
raise ManifestError(
'Bad manifest: missing required key(s): {}'.format(
', '.join(missing_keys)
)
)
# Required.
theme_name = _make_property('theme_name')
author = _make_property('author')
version = _make_property('version')
qss_file = _make_property('qss_file')
# Optional.
preview_image = _make_property('preview_image')
notes = _make_property('notes')
clr_file = _make_property('clr_file')
| {
"repo_name": "zyantific/IDASkins",
"path": "plugins/idaskins/thememanifest.py",
"copies": "1",
"size": "1247",
"license": "mit",
"hash": -1688466534349475800,
"line_mean": 25.5319148936,
"line_max": 67,
"alpha_frac": 0.5942261427,
"autogenerated": false,
"ratio": 3.909090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9996668115620696,
"avg_score": 0.0013297872340425532,
"num_lines": 47
} |
from __future__ import absolute_import, division, print_function
import linecache
import logging
import os.path
import sys
def get_exception_source():
'''Returns full file path, file name, line number, function name, and line contents
causing the last exception.'''
_, _, tb = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filefullpath = co.co_filename
filename = os.path.basename(filefullpath)
name = co.co_name
linecache.checkcache(filefullpath)
line = linecache.getline(filefullpath, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
return filefullpath, filename, lineno, name, line
class CallbackHandler(logging.Handler):
'''This handler sends logrecords to a callback function.'''
def __init__(self, callback):
'''Set up a handler instance, record the callback function.'''
super(CallbackHandler, self).__init__()
self._callback = callback
def prepare(self, record):
# Function taken from Python 3.6 QueueHandler
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
'''Send a LogRecord to the callback function, after preparing it
for serialization.'''
try:
self._callback(self.prepare(record))
except Exception:
self.handleError(record)
| {
"repo_name": "xia2/workflows",
"path": "workflows/logging/__init__.py",
"copies": "1",
"size": "2221",
"license": "bsd-3-clause",
"hash": 7995685854873166000,
"line_mean": 33.703125,
"line_max": 85,
"alpha_frac": 0.7019360648,
"autogenerated": false,
"ratio": 4.052919708029197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5254855772829197,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import linecache
import sys
import warnings
import pytest
from characteristic import (
Attribute,
NOTHING,
PY26,
_attrs_to_script,
_ensure_attributes,
attributes,
immutable,
with_cmp,
with_init,
with_repr,
)
PY2 = sys.version_info[0] == 2
warnings.simplefilter("always")
class TestAttribute(object):
def test_init_simple(self):
"""
Instantiating with just the name initializes properly.
"""
a = Attribute("foo")
assert "foo" == a.name
assert NOTHING is a.default_value
def test_init_default_factory(self):
"""
Instantiating with default_factory creates a proper descriptor for
_default.
"""
a = Attribute("foo", default_factory=list)
assert NOTHING is a.default_value
assert list() == a.default_factory()
def test_init_default_value(self):
"""
Instantiating with default_value initializes default properly.
"""
a = Attribute("foo", default_value="bar")
assert "bar" == a.default_value
def test_ambiguous_defaults(self):
"""
Instantiating with both default_value and default_factory raises
ValueError.
"""
with pytest.raises(ValueError):
Attribute(
"foo",
default_value="bar",
default_factory=lambda: 42
)
def test_missing_attr(self):
"""
Accessing inexistent attributes still raises an AttributeError.
"""
a = Attribute("foo")
with pytest.raises(AttributeError):
a.bar
def test_alias(self):
"""
If an attribute with a leading _ is defined, the initializer keyword
is stripped of it.
"""
a = Attribute("_private")
assert "private" == a._kw_name
def test_non_alias(self):
"""
The keyword name of a non-private
"""
a = Attribute("public")
assert "public" == a._kw_name
def test_dunder(self):
"""
Dunder gets all _ stripped.
"""
a = Attribute("__very_private")
assert "very_private" == a._kw_name
def test_init_aliaser_none(self):
"""
No aliasing if init_aliaser is None.
"""
a = Attribute("_private", init_aliaser=None)
assert a.name == a._kw_name
def test_init_aliaser(self):
"""
Any callable works for aliasing.
"""
a = Attribute("a", init_aliaser=lambda _: "foo")
assert "foo" == a._kw_name
def test_repr(self):
"""
repr returns the correct string.
"""
a = Attribute(
name="name",
exclude_from_cmp=True,
exclude_from_init=True,
exclude_from_repr=True,
exclude_from_immutable=True,
default_value=42,
instance_of=str,
init_aliaser=None
)
assert (
"<Attribute(name='name', exclude_from_cmp=True, "
"exclude_from_init=True, exclude_from_repr=True, "
"exclude_from_immutable=True, "
"default_value=42, default_factory=None, instance_of=<{0} 'str'>,"
" init_aliaser=None)>"
).format("type" if PY2 else "class") == repr(a)
def test_eq_different_types(self):
"""
Comparing Attribute with something else returns NotImplemented.
"""
assert NotImplemented == Attribute(name="name").__eq__(None)
def test_eq_equal(self):
"""
Equal Attributes are detected equal.
"""
kw = {
"name": "name",
"exclude_from_cmp": True,
"exclude_from_init": False,
"exclude_from_repr": True,
"exclude_from_immutable": False,
"default_value": 42,
"instance_of": int,
}
assert Attribute(**kw) == Attribute(**kw)
def test_eq_unequal(self):
"""
Equal Attributes are detected equal.
"""
kw = {
"name": "name",
"exclude_from_cmp": True,
"exclude_from_init": False,
"exclude_from_repr": True,
"exclude_from_immutable": False,
"default_value": 42,
"instance_of": int,
}
for arg in kw.keys():
kw_mutated = dict(**kw)
kw_mutated[arg] = "mutated"
assert Attribute(**kw) != Attribute(**kw_mutated)
@with_cmp(["a", "b"])
class CmpC(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestWithCmp(object):
def test_equal(self):
"""
Equal objects are detected as equal.
"""
assert CmpC(1, 2) == CmpC(1, 2)
assert not (CmpC(1, 2) != CmpC(1, 2))
def test_unequal_same_class(self):
"""
Unequal objects of correct type are detected as unequal.
"""
assert CmpC(1, 2) != CmpC(2, 1)
assert not (CmpC(1, 2) == CmpC(2, 1))
def test_unequal_different_class(self):
"""
Unequal objects of different type are detected even if their attributes
match.
"""
class NotCmpC(object):
a = 1
b = 2
assert CmpC(1, 2) != NotCmpC()
assert not (CmpC(1, 2) == NotCmpC())
@pytest.mark.parametrize(
"a,b", [
((1, 2), (2, 1)),
((1, 2), (1, 3)),
(("a", "b"), ("b", "a")),
]
)
def test_lt(self, a, b):
"""
__lt__ compares objects as tuples of attribute values.
"""
assert CmpC(*a) < CmpC(*b)
def test_lt_unordable(self):
"""
__lt__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__lt__(42))
@pytest.mark.parametrize(
"a,b", [
((1, 2), (2, 1)),
((1, 2), (1, 3)),
((1, 1), (1, 1)),
(("a", "b"), ("b", "a")),
(("a", "b"), ("a", "b")),
]
)
def test_le(self, a, b):
"""
__le__ compares objects as tuples of attribute values.
"""
assert CmpC(*a) <= CmpC(*b)
def test_le_unordable(self):
"""
__le__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__le__(42))
@pytest.mark.parametrize(
"a,b", [
((2, 1), (1, 2)),
((1, 3), (1, 2)),
(("b", "a"), ("a", "b")),
]
)
def test_gt(self, a, b):
"""
__gt__ compares objects as tuples of attribute values.
"""
assert CmpC(*a) > CmpC(*b)
def test_gt_unordable(self):
"""
__gt__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__gt__(42))
@pytest.mark.parametrize(
"a,b", [
((2, 1), (1, 2)),
((1, 3), (1, 2)),
((1, 1), (1, 1)),
(("b", "a"), ("a", "b")),
(("a", "b"), ("a", "b")),
]
)
def test_ge(self, a, b):
"""
__ge__ compares objects as tuples of attribute values.
"""
assert CmpC(*a) >= CmpC(*b)
def test_ge_unordable(self):
"""
__ge__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__ge__(42))
def test_hash(self):
"""
__hash__ returns different hashes for different values.
"""
assert hash(CmpC(1, 2)) != hash(CmpC(1, 1))
def test_Attribute_exclude_from_cmp(self):
"""
Ignores attribute if exclude_from_cmp=True.
"""
@with_cmp([Attribute("a", exclude_from_cmp=True), "b"])
class C(object):
def __init__(self, a, b):
self.a = a
self.b = b
assert C(42, 1) == C(23, 1)
@with_repr(["a", "b"])
class ReprC(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestReprAttrs(object):
def test_repr(self):
"""
Test repr returns a sensible value.
"""
assert "<ReprC(a=1, b=2)>" == repr(ReprC(1, 2))
def test_Attribute_exclude_from_repr(self):
"""
Ignores attribute if exclude_from_repr=True.
"""
@with_repr([Attribute("a", exclude_from_repr=True), "b"])
class C(object):
def __init__(self, a, b):
self.a = a
self.b = b
assert "<C(b=2)>" == repr(C(1, 2))
@with_init([Attribute("a"), Attribute("b")])
class InitC(object):
def __init__(self):
if self.a == self.b:
raise ValueError
class TestWithInit(object):
def test_sets_attributes(self):
"""
The attributes are initialized using the passed keywords.
"""
obj = InitC(a=1, b=2)
assert 1 == obj.a
assert 2 == obj.b
def test_custom_init(self):
"""
The class initializer is called too.
"""
with pytest.raises(ValueError):
InitC(a=1, b=1)
def test_passes_args(self):
"""
All positional parameters are passed to the original initializer.
"""
@with_init(["a"])
class InitWithArg(object):
def __init__(self, arg):
self.arg = arg
obj = InitWithArg(42, a=1)
assert 42 == obj.arg
assert 1 == obj.a
def test_passes_remaining_kw(self):
"""
Keyword arguments that aren't used for attributes are passed to the
original initializer.
"""
@with_init(["a"])
class InitWithKWArg(object):
def __init__(self, kw_arg=None):
self.kw_arg = kw_arg
obj = InitWithKWArg(a=1, kw_arg=42)
assert 42 == obj.kw_arg
assert 1 == obj.a
def test_does_not_pass_attrs(self):
"""
The attributes are removed from the keyword arguments before they are
passed to the original initializer.
"""
@with_init(["a"])
class InitWithKWArgs(object):
def __init__(self, **kw):
assert "a" not in kw
assert "b" in kw
InitWithKWArgs(a=1, b=42)
def test_defaults(self):
"""
If defaults are passed, they are used as fallback.
"""
@with_init(["a", "b"], defaults={"b": 2})
class InitWithDefaults(object):
pass
obj = InitWithDefaults(a=1)
assert 2 == obj.b
def test_missing_arg(self):
"""
Raises `ValueError` if a value isn't passed.
"""
with pytest.raises(ValueError) as e:
InitC(a=1)
assert "Missing keyword value for 'b'." == e.value.args[0]
def test_defaults_conflict(self):
"""
Raises `ValueError` if both defaults and an Attribute are passed.
"""
with pytest.raises(ValueError) as e:
@with_init([Attribute("a")], defaults={"a": 42})
class C(object):
pass
assert (
"Mixing of the 'defaults' keyword argument and passing instances "
"of Attribute for 'attrs' is prohibited. Please don't use "
"'defaults' anymore, it has been deprecated in 14.0."
== e.value.args[0]
)
def test_attribute(self):
"""
String attributes are converted to Attributes and thus work.
"""
@with_init(["a"])
class C(object):
pass
o = C(a=1)
assert 1 == o.a
def test_default_factory(self):
"""
The default factory is used for each instance of missing keyword
argument.
"""
@with_init([Attribute("a", default_factory=list)])
class C(object):
pass
o1 = C()
o2 = C()
assert o1.a is not o2.a
def test_underscores(self):
"""
with_init takes keyword aliasing into account.
"""
@with_init([Attribute("_a")])
class C(object):
pass
c = C(a=1)
assert 1 == c._a
def test_plain_no_alias(self):
"""
str-based attributes don't get aliased for backward-compatibility.
"""
@with_init(["_a"])
class C(object):
pass
c = C(_a=1)
assert 1 == c._a
def test_instance_of_fail(self):
"""
Raise `TypeError` if an Attribute with an `instance_of` is is attempted
to be set to a mismatched type.
"""
@with_init([Attribute("a", instance_of=int)])
class C(object):
pass
with pytest.raises(TypeError) as e:
C(a="not an int!")
assert (
"Attribute 'a' must be an instance of 'int'."
== e.value.args[0]
)
def test_instance_of_success(self):
"""
Setting an attribute to a value that doesn't conflict with an
`instance_of` declaration works.
"""
@with_init([Attribute("a", instance_of=int)])
class C(object):
pass
c = C(a=42)
assert 42 == c.a
def test_Attribute_exclude_from_init(self):
"""
Ignores attribute if exclude_from_init=True.
"""
@with_init([Attribute("a", exclude_from_init=True), "b"])
class C(object):
pass
C(b=1)
def test_deprecation_defaults(self):
"""
Emits a DeprecationWarning if `defaults` is used.
"""
with warnings.catch_warnings(record=True) as w:
@with_init(["a"], defaults={"a": 42})
class C(object):
pass
assert (
'`defaults` has been deprecated in 14.0, please use the '
'`Attribute` class instead.'
) == w[0].message.args[0]
assert issubclass(w[0].category, DeprecationWarning)
def test_linecache(self):
"""
The created init method is added to the linecache so PDB shows it
properly.
"""
attrs = [Attribute("a")]
@with_init(attrs)
class C(object):
pass
assert isinstance(linecache.cache[C.__init__.__code__.co_filename],
tuple)
def test_linecache_attrs_unique(self):
"""
If the attributes are the same, only one linecache entry is created.
Since the key within the cache is the filename, this effectively means
that the filenames must be equal if the attributes are equal.
"""
attrs = [Attribute("a")]
@with_init(attrs[:])
class C1(object):
pass
@with_init(attrs[:])
class C2(object):
pass
assert (
C1.__init__.__code__.co_filename
== C2.__init__.__code__.co_filename
)
def test_linecache_different_attrs(self):
"""
Different Attributes have different generated filenames.
"""
@with_init([Attribute("a")])
class C1(object):
pass
@with_init([Attribute("b")])
class C2(object):
pass
assert (
C1.__init__.__code__.co_filename
!= C2.__init__.__code__.co_filename
)
def test_no_attributes(self):
"""
Specifying no attributes doesn't raise an exception.
"""
@with_init([])
class C(object):
pass
C()
class TestAttributes(object):
def test_leaves_init_alone(self):
"""
If *apply_with_init* or *create_init* is `False`, leave __init__ alone.
"""
@attributes(["a"], apply_with_init=False)
class C(object):
pass
@attributes(["a"], create_init=False)
class CDeprecated(object):
pass
obj1 = C()
obj2 = CDeprecated()
with pytest.raises(AttributeError):
obj1.a
with pytest.raises(AttributeError):
obj2.a
def test_wraps_init(self):
"""
If *create_init* is `True`, build initializer.
"""
@attributes(["a", "b"], apply_with_init=True)
class C(object):
pass
obj = C(a=1, b=2)
assert 1 == obj.a
assert 2 == obj.b
def test_immutable(self):
"""
If *apply_immutable* is `True`, make class immutable.
"""
@attributes(["a"], apply_immutable=True)
class ImmuClass(object):
pass
obj = ImmuClass(a=42)
with pytest.raises(AttributeError):
obj.a = "23"
def test_apply_with_cmp(self):
"""
Don't add cmp methods if *apply_with_cmp* is `False`.
"""
@attributes(["a"], apply_with_cmp=False)
class C(object):
pass
obj = C(a=1)
if PY2:
assert None is getattr(obj, "__eq__", None)
else:
assert object.__eq__ == C.__eq__
def test_apply_with_repr(self):
"""
Don't add __repr__ if *apply_with_repr* is `False`.
"""
@attributes(["a"], apply_with_repr=False)
class C(object):
pass
assert repr(C(a=1)).startswith("<test_characteristic.")
def test_store_attributes(self):
"""
store_attributes is called on the class to store the attributes that
were passed in.
"""
attrs = [Attribute("a"), Attribute("b")]
@attributes(
attrs, store_attributes=lambda cls, a: setattr(cls, "foo", a)
)
class C(object):
pass
assert C.foo == attrs
def test_store_attributes_stores_Attributes(self):
"""
The attributes passed to store_attributes are always instances of
Attribute, even if they were simple strings when provided.
"""
@attributes(["a", "b"])
class C(object):
pass
assert C.characteristic_attributes == [Attribute("a"), Attribute("b")]
def test_store_attributes_defaults_to_characteristic_attributes(self):
"""
By default, store_attributes stores the attributes in
`characteristic_attributes` on the class.
"""
attrs = [Attribute("a")]
@attributes(attrs)
class C(object):
pass
assert C.characteristic_attributes == attrs
def test_private(self):
"""
Integration test for name mangling/aliasing.
"""
@attributes([Attribute("_a")])
class C(object):
pass
c = C(a=42)
assert 42 == c._a
def test_private_no_alias(self):
"""
Integration test for name mangling/aliasing.
"""
@attributes([Attribute("_a", init_aliaser=None)])
class C(object):
pass
c = C(_a=42)
assert 42 == c._a
def test_deprecation_create_init(self):
"""
Emits a DeprecationWarning if `create_init` is used.
"""
with warnings.catch_warnings(record=True) as w:
@attributes(["a"], create_init=False)
class C(object):
pass
assert (
'`create_init` has been deprecated in 14.0, please use '
'`apply_with_init`.'
) == w[0].message.args[0]
assert issubclass(w[0].category, DeprecationWarning)
def test_deprecation_defaults(self):
"""
Emits a DeprecationWarning if `defaults` is used.
"""
with warnings.catch_warnings(record=True) as w:
@attributes(["a"], defaults={"a": 42})
class C(object):
pass
assert (
'`defaults` has been deprecated in 14.0, please use the '
'`Attribute` class instead.'
) == w[0].message.args[0]
assert issubclass(w[0].category, DeprecationWarning)
def test_does_not_allow_extra_keyword_arguments(self):
"""
Keyword arguments other than the ones consumed are still TypeErrors.
"""
with pytest.raises(TypeError) as e:
@attributes(["a"], not_an_arg=12)
class C(object):
pass
assert e.value.args == (
"attributes() got an unexpected keyword argument 'not_an_arg'",
)
def test_no_attributes(self):
"""
Specifying no attributes doesn't raise an exception.
"""
@attributes([])
class C(object):
pass
C()
class TestEnsureAttributes(object):
def test_leaves_attribute_alone(self):
"""
List items that are an Attribute stay an Attribute.
"""
a = Attribute("a")
assert a is _ensure_attributes([a], {})[0]
def test_converts_rest(self):
"""
Any other item will be transformed into an Attribute.
"""
l = _ensure_attributes(["a"], {})
assert isinstance(l[0], Attribute)
assert "a" == l[0].name
def test_defaults(self):
"""
Legacy defaults are translated into default_value attributes.
"""
l = _ensure_attributes(["a"], {"a": 42})
assert 42 == l[0].default_value
def test_defaults_Attribute(self):
"""
Raises ValueError on defaults != {} and an Attribute within attrs.
"""
with pytest.raises(ValueError):
_ensure_attributes([Attribute("a")], defaults={"a": 42})
class TestImmutable(object):
def test_bare(self):
"""
In an immutable class, setting an definition-time attribute raises an
AttributeError.
"""
@immutable(["foo"])
class ImmuClass(object):
foo = "bar"
i = ImmuClass()
with pytest.raises(AttributeError):
i.foo = "not bar"
def test_Attribute(self):
"""
Mutation is caught if user passes an Attribute instance.
"""
@immutable([Attribute("foo")])
class ImmuClass(object):
def __init__(self):
self.foo = "bar"
i = ImmuClass()
with pytest.raises(AttributeError):
i.foo = "not bar"
def test_init(self):
"""
Changes within __init__ are allowed.
"""
@immutable(["foo"])
class ImmuClass(object):
def __init__(self):
self.foo = "bar"
i = ImmuClass()
assert "bar" == i.foo
def test_with_init(self):
"""
Changes in with_init's initializer are allowed.
"""
@immutable(["foo"])
@with_init(["foo"])
class ImmuClass(object):
pass
i = ImmuClass(foo="qux")
assert "qux" == i.foo
def test_Attribute_exclude_from_immutable(self):
"""
Ignores attribute if exclude_from_immutable=True.
"""
@immutable([Attribute("a", exclude_from_immutable=True), "b"])
class C(object):
def __init__(self, a, b):
self.a = a
self.b = b
c = C(1, 2)
c.a = 3
with pytest.raises(AttributeError):
c.b = 4
class TestAttrsToScript(object):
@pytest.mark.skipif(PY26, reason="Optimization works only on Python 2.7.")
def test_optimizes_simple(self):
"""
If no defaults and extra checks are passed, an optimized version is
used on Python 2.7+.
"""
attrs = [Attribute("a")]
script = _attrs_to_script(attrs)
assert "except KeyError as e:" in script
def test_nothing():
"""
``NOTHING`` has a sensible repr.
"""
assert "NOTHING" == repr(NOTHING)
def test_doc():
"""
The characteristic module has a docstring.
"""
import characteristic
assert characteristic.__doc__
| {
"repo_name": "hynek/characteristic",
"path": "test_characteristic.py",
"copies": "1",
"size": "23985",
"license": "mit",
"hash": 5225856543588228000,
"line_mean": 26.3177676538,
"line_max": 79,
"alpha_frac": 0.5089430894,
"autogenerated": false,
"ratio": 4.0216297786720325,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030572868072032,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import locale
import re
import os
import sys
import stat
from glob import glob
from os.path import (basename, dirname, join, splitext, isdir, isfile, exists,
islink, realpath, relpath)
try:
from os import readlink
except ImportError:
readlink = False
import io
from subprocess import call, Popen, PIPE
from collections import defaultdict
from conda_build.config import config
from conda_build import external
from conda_build import environ
from conda_build import utils
from conda_build import source
from conda.compat import lchmod
from conda.misc import walk_prefix
from conda.utils import md5_file
if sys.platform.startswith('linux'):
from conda_build import elf
elif sys.platform == 'darwin':
from conda_build import macho
SHEBANG_PAT = re.compile(r'^#!.+$', re.M)
def is_obj(path):
assert sys.platform != 'win32'
return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or
(sys.platform == 'darwin' and macho.is_macho(path)))
def fix_shebang(f, osx_is_app=False):
path = join(config.build_prefix, f)
if is_obj(path):
return
elif os.path.islink(path):
return
with io.open(path, encoding=locale.getpreferredencoding()) as fi:
try:
data = fi.read()
except UnicodeDecodeError: # file is binary
return
m = SHEBANG_PAT.match(data)
if not (m and 'python' in m.group()):
return
py_exec = ('/bin/bash ' + config.build_prefix + '/bin/python.app'
if sys.platform == 'darwin' and osx_is_app else
config.build_prefix + '/bin/' + basename(config.build_python))
new_data = SHEBANG_PAT.sub('#!' + py_exec, data, count=1)
if new_data == data:
return
print("updating shebang:", f)
with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:
fo.write(new_data)
os.chmod(path, int('755', 8))
def write_pth(egg_path):
fn = basename(egg_path)
with open(join(environ.get_sp_dir(),
'%s.pth' % (fn.split('-')[0])), 'w') as fo:
fo.write('./%s\n' % fn)
def remove_easy_install_pth(files, preserve_egg_dir=False):
"""
remove the need for easy-install.pth and finally remove easy-install.pth
itself
"""
absfiles = [join(config.build_prefix, f) for f in files]
sp_dir = environ.get_sp_dir()
for egg_path in glob(join(sp_dir, '*-py*.egg')):
if isdir(egg_path):
if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i
in walk_prefix(egg_path, False, windows_forward_slashes=False)):
write_pth(egg_path)
continue
print('found egg dir:', egg_path)
try:
os.rename(join(egg_path, 'EGG-INFO'),
egg_path + '-info')
except OSError:
pass
utils.rm_rf(join(egg_path, 'EGG-INFO'))
for fn in os.listdir(egg_path):
if fn == '__pycache__':
utils.rm_rf(join(egg_path, fn))
else:
# this might be a name-space package
# so the package directory already exists
# from another installed dependency
if os.path.exists(join(sp_dir, fn)):
utils.copy_into(join(egg_path, fn), join(sp_dir, fn))
utils.rm_rf(join(egg_path, fn))
else:
os.rename(join(egg_path, fn), join(sp_dir, fn))
elif isfile(egg_path):
if not egg_path in absfiles:
continue
print('found egg:', egg_path)
write_pth(egg_path)
utils.rm_rf(join(sp_dir, 'easy-install.pth'))
def rm_py_along_so():
"remove .py (.pyc) files alongside .so or .pyd files"
for root, dirs, files in os.walk(config.build_prefix):
for fn in files:
if fn.endswith(('.so', '.pyd')):
name, unused_ext = splitext(fn)
for ext in '.py', '.pyc':
if name + ext in files:
os.unlink(join(root, name + ext))
def compile_missing_pyc():
sp_dir = environ.get_sp_dir()
stdlib_dir = environ.get_stdlib_dir()
need_compile = False
for root, dirs, files in os.walk(sp_dir):
for fn in files:
if fn.endswith('.py') and fn + 'c' not in files:
need_compile = True
break
if need_compile:
print('compiling .pyc files...')
utils._check_call([config.build_python, '-Wi',
join(stdlib_dir, 'compileall.py'),
'-q', '-x', 'port_v3', sp_dir])
def post_process(files, preserve_egg_dir=False):
remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)
rm_py_along_so()
if config.CONDA_PY < 30:
compile_missing_pyc()
def find_lib(link, path=None):
from conda_build.build import prefix_files
files = prefix_files()
if link.startswith(config.build_prefix):
link = link[len(config.build_prefix) + 1:]
if link not in files:
sys.exit("Error: Could not find %s" % link)
return link
if link.startswith('/'): # but doesn't start with the build prefix
return
if link.startswith('@rpath/'):
# Assume the rpath already points to lib, so there is no need to
# change it.
return
if '/' not in link or link.startswith('@executable_path/'):
link = basename(link)
file_names = defaultdict(list)
for f in files:
file_names[basename(f)].append(f)
if link not in file_names:
sys.exit("Error: Could not find %s" % link)
if len(file_names[link]) > 1:
if path and basename(path) == link:
# The link is for the file itself, just use it
return path
# Allow for the possibility of the same library appearing in
# multiple places.
md5s = set()
for f in file_names[link]:
md5s.add(md5_file(join(config.build_prefix, f)))
if len(md5s) > 1:
sys.exit("Error: Found multiple instances of %s: %s" % (link, file_names[link]))
else:
file_names[link].sort()
print("Found multiple instances of %s (%s). "
"Choosing the first one." % (link, file_names[link]))
return file_names[link][0]
print("Don't know how to find %s, skipping" % link)
def osx_ch_link(path, link):
print("Fixing linking of %s in %s" % (link, path))
link_loc = find_lib(link, path)
if not link_loc:
return
lib_to_link = relpath(dirname(link_loc), 'lib')
# path_to_lib = utils.relative(path[len(config.build_prefix) + 1:])
# e.g., if
# path = '/build_prefix/lib/some/stuff/libstuff.dylib'
# link_loc = 'lib/things/libthings.dylib'
# then
# lib_to_link = 'things'
# path_to_lib = '../..'
# @rpath always means 'lib', link will be at
# @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.
# For when we can't use @rpath, @loader_path means the path to the library
# ('path'), so from path to link is
# @loader_path/path_to_lib/lib_to_link/basename(link), like
# @loader_path/../../things/libthings.dylib.
ret = '@rpath/%s/%s' % (lib_to_link, basename(link))
# XXX: IF the above fails for whatever reason, the below can be used
# TODO: This might contain redundant ..'s if link and path are both in
# some subdirectory of lib.
# ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))
ret = ret.replace('/./', '/')
return ret
def mk_relative_osx(path, build_prefix=None):
'''
if build_prefix is None, then this is a standard conda build. The path
and all dependencies are in the build_prefix.
if package is built in develop mode, build_prefix is specified. Object
specified by 'path' needs to relink runtime dependences to libs found in
build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'
'''
if build_prefix is None:
assert path.startswith(config.build_prefix + '/')
else:
config.short_build_prefix = build_prefix
assert sys.platform == 'darwin' and is_obj(path)
s = macho.install_name_change(path, osx_ch_link)
names = macho.otool(path)
if names:
# Strictly speaking, not all object files have install names (e.g.,
# bundles and executables do not). In that case, the first name here
# will not be the install name (i.e., the id), but it isn't a problem,
# because in that case it will be a no-op (with the exception of stub
# files, which give an error, which is handled below).
args = [
'install_name_tool',
'-id',
join('@rpath', relpath(dirname(path),
join(config.build_prefix, 'lib')),
basename(names[0])),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
# Add an rpath to every executable to increase the chances of it
# being found.
args = [
'install_name_tool',
'-add_rpath',
join('@loader_path',
relpath(join(config.build_prefix, 'lib'),
dirname(path)), '').replace('/./', '/'),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
if s:
# Skip for stub files, which have to use binary_has_prefix_files to be
# made relocatable.
assert_relative_osx(path)
def mk_relative_linux(f, rpaths=('lib',)):
path = join(config.build_prefix, f)
rpath = ':'.join('$ORIGIN/' + utils.relative(f, d) if not
d.startswith('/') else d for d in rpaths)
patchelf = external.find_executable('patchelf')
print('patchelf: file: %s\n setting rpath to: %s' % (path, rpath))
call([patchelf, '--force-rpath', '--set-rpath', rpath, path])
def assert_relative_osx(path):
for name in macho.otool(path):
assert not name.startswith(config.build_prefix), path
def mk_relative(m, f):
assert sys.platform != 'win32'
path = join(config.build_prefix, f)
if not is_obj(path):
return
if sys.platform.startswith('linux'):
mk_relative_linux(f, rpaths=m.get_value('build/rpaths', ['lib']))
elif sys.platform == 'darwin':
mk_relative_osx(path)
def fix_permissions(files):
print("Fixing permissions")
for root, dirs, unused_files in os.walk(config.build_prefix):
for dn in dirs:
lchmod(join(root, dn), int('755', 8))
for f in files:
path = join(config.build_prefix, f)
st = os.lstat(path)
lchmod(path, stat.S_IMODE(st.st_mode) | stat.S_IWUSR) # chmod u+w
def post_build(m, files):
print('number of files:', len(files))
fix_permissions(files)
if sys.platform == 'win32':
return
binary_relocation = bool(m.get_value('build/binary_relocation', True))
if not binary_relocation:
print("Skipping binary relocation logic")
osx_is_app = bool(m.get_value('build/osx_is_app', False))
for f in files:
if f.startswith('bin/'):
fix_shebang(f, osx_is_app=osx_is_app)
if binary_relocation:
mk_relative(m, f)
check_symlinks(files)
def check_symlinks(files):
if readlink is False:
return # Not on Unix system
msgs = []
real_build_prefix = realpath(config.build_prefix)
for f in files:
path = join(real_build_prefix, f)
if islink(path):
link_path = readlink(path)
real_link_path = realpath(path)
if real_link_path.startswith(real_build_prefix):
# If the path is in the build prefix, this is fine, but
# the link needs to be relative
if not link_path.startswith('.'):
# Don't change the link structure if it is already a
# relative link. It's possible that ..'s later in the path
# can result in a broken link still, but we'll assume that
# such crazy things don't happen.
print("Making absolute symlink %s -> %s relative" % (f, link_path))
os.unlink(path)
os.symlink(relpath(real_link_path, dirname(path)), path)
else:
# Symlinks to absolute paths on the system (like /usr) are fine.
if real_link_path.startswith(config.croot):
msgs.append("%s is a symlink to a path that may not "
"exist after the build is completed (%s)" % (f, link_path))
if msgs:
for msg in msgs:
print("Error: %s" % msg, file=sys.stderr)
sys.exit(1)
def get_build_metadata(m):
src_dir = source.get_dir()
if exists(join(src_dir, '__conda_version__.txt')):
with open(join(src_dir, '__conda_version__.txt')) as f:
version = f.read().strip()
print("Setting version from __conda_version__.txt: %s" % version)
m.meta['package']['version'] = version
if exists(join(src_dir, '__conda_buildnum__.txt')):
with open(join(src_dir, '__conda_buildnum__.txt')) as f:
build_number = f.read().strip()
print("Setting build number from __conda_buildnum__.txt: %s" %
build_number)
m.meta['build']['number'] = build_number
if exists(join(src_dir, '__conda_buildstr__.txt')):
with open(join(src_dir, '__conda_buildstr__.txt')) as f:
buildstr = f.read().strip()
print("Setting version from __conda_buildstr__.txt: %s" % buildstr)
m.meta['build']['string'] = buildstr
| {
"repo_name": "sandhujasmine/conda-build",
"path": "conda_build/post.py",
"copies": "1",
"size": "15124",
"license": "bsd-3-clause",
"hash": 84291779202404860,
"line_mean": 35.798053528,
"line_max": 96,
"alpha_frac": 0.5717402803,
"autogenerated": false,
"ratio": 3.6968956245416766,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47686359048416765,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from abc import ABCMeta, abstractmethod
from glue.utils import CallbackMixin
from glue.core.data_factories import load_data
MAX_UNDO = 50
"""
The classes in this module allow user actions to be stored as commands,
which can be undone/redone
All UI frontends should map interactions to command objects, instead
of directly performing an action.
Commands have access to two sources of data: the first are the
keyword arguments passed to the constructor. These are stored as
attributes of self. The second is a session object passed to all
Command.do and Command.undo calls.
"""
class Command(object):
"""
A class to encapsulate (and possibly undo) state changes
Subclasses of this abstract base class must implement the
`do` and `undo` methods.
Both `do` and `undo` receive a single input argument named
`session` -- this is whatever object is passed to the constructor
of :class:`glue.core.command.CommandStack`. This object is used
to store and retrieve resources needed by each command. The
Glue application itself uses a :class:`~glue.core.session.Session`
instance for this.
Each class should also override the class-level kwargs list,
to list the required keyword arguments that should be passed to the
command constructor. The base class will check that these
keywords are indeed provided. Commands should not take
non-keyword arguments in the constructor method
"""
__metaclass__ = ABCMeta
kwargs = []
def __init__(self, **kwargs):
kwargs = kwargs.copy()
for k in self.kwargs:
if k not in kwargs:
raise RuntimeError("Required keyword %s not passed to %s" %
(k, type(self)))
setattr(self, k, kwargs.pop(k))
self.extra = kwargs
@abstractmethod
def do(self, session):
"""
Execute the command
:param session: An object used to store and fetch resources
needed by a Command.
"""
pass
@abstractmethod
def undo(self, session):
pass
@property
def label(self):
return type(self).__name__
class CommandStack(CallbackMixin):
"""
The command stack collects commands,
and saves them to enable undoing/redoing
After instantiation, something can be assigned to
the session property. This is passed as the sole argument
of all Command (un)do methods.
"""
def __init__(self):
super(CommandStack, self).__init__()
self._session = None
self._command_stack = []
self._undo_stack = []
@property
def session(self):
return self._session
@session.setter
def session(self, value):
self._session = value
@property
def undo_label(self):
""" Brief label for the command reversed by an undo """
if len(self._command_stack) == 0:
return ''
cmd = self._command_stack[-1]
return cmd.label
@property
def redo_label(self):
""" Brief label for the command executed on a redo"""
if len(self._undo_stack) == 0:
return ''
cmd = self._undo_stack[-1]
return cmd.label
def do(self, cmd):
"""
Execute and log a new command
:rtype: The return value of cmd.do()
"""
logging.getLogger(__name__).debug("Do %s", cmd)
self._command_stack.append(cmd)
result = cmd.do(self._session)
self._command_stack = self._command_stack[-MAX_UNDO:]
self._undo_stack = []
self.notify('do')
return result
def undo(self):
"""
Undo the previous command
:raises: IndexError, if there are no objects to undo
"""
try:
c = self._command_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to undo")
self._undo_stack.append(c)
c.undo(self._session)
self.notify('undo')
def redo(self):
"""
Redo the previously-undone command
:raises: IndexError, if there are no undone actions
"""
try:
c = self._undo_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to redo")
result = c.do(self._session)
self._command_stack.append(c)
self.notify('redo')
return result
def can_undo_redo(self):
"""
Return whether undo and redo options are possible
:rtype: (bool, bool) - Whether undo and redo are possible, respectively
"""
return len(self._command_stack) > 0, len(self._undo_stack) > 0
class LoadData(Command):
kwargs = ['path', 'factory']
label = 'load data'
def do(self, session):
return load_data(self.path, self.factory)
def undo(self, session):
pass
class AddData(Command):
kwargs = ['data']
label = 'add data'
def do(self, session):
session.data_collection.append(self.data)
def undo(self, session):
session.data_collection.remove(self.data)
class RemoveData(Command):
kwargs = ['data']
label = 'remove data'
def do(self, session):
session.data_collection.remove(self.data)
def undo(self, session):
session.data_collection.append(self.data)
class NewDataViewer(Command):
"""Add a new data viewer to the application
:param viewer: The class of viewer to create
:param data: The data object to initialize the viewer with, or None
:type date: :class:`~glue.core.data.Data` or None
"""
kwargs = ['viewer', 'data']
label = 'new data viewer'
def do(self, session):
v = session.application.new_data_viewer(self.viewer, self.data)
self.created = v
return v
def undo(self, session):
self.created.close(warn=False)
class AddLayer(Command):
"""Add a new layer to a viewer
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
:param viewer: The viewer to add the layer to
"""
kwargs = ['layer', 'viewer']
label = 'add layer'
def do(self, session):
self.viewer.add_layer(self.layer)
def undo(self, session):
self.viewer.remove_layer(self.layer)
class ApplyROI(Command):
"""
Apply an ROI to a client, updating subset states
:param client: Client to work on
:type client: :class:`~glue.core.client.Client`
:param roi: Roi to apply
:type roi: :class:`~glue.core.roi.Roi`
"""
kwargs = ['client', 'roi']
label = 'apply ROI'
def do(self, session):
self.old_states = {}
for data in self.client.data:
for subset in data.subsets:
self.old_states[subset] = subset.subset_state
self.client.apply_roi(self.roi)
def undo(self, session):
for data in self.client.data:
for subset in data.subsets:
if subset not in self.old_states:
subset.delete()
for k, v in self.old_states.items():
k.subset_state = v
class LinkData(Command):
pass
class SetViewState(Command):
pass
class NewTab(Command):
pass
class CloseTab(Command):
pass
class NewSubset(Command):
pass
class CopySubset(Command):
pass
class PasteSubset(Command):
pass
class SpecialPasteSubset(Command):
pass
class DeleteSubset(Command):
pass
class SetStyle(Command):
pass
class SetLabel(Command):
pass
| {
"repo_name": "saimn/glue",
"path": "glue/core/command.py",
"copies": "2",
"size": "7773",
"license": "bsd-3-clause",
"hash": -8640250691673295000,
"line_mean": 23.7547770701,
"line_max": 83,
"alpha_frac": 0.6146918822,
"autogenerated": false,
"ratio": 4.059007832898172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5673699715098173,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
from matplotlib.cm import gray
from glue.external import six
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
from glue.core.util import small_view, small_view_array
from glue.utils import view_cascade, get_extent, color2rgb, Pointer
from .ds9norm import DS9Normalize
__all__ = ['RGBImageLayerArtist', 'ImageLayerArtist']
@six.add_metaclass(ABCMeta)
class RGBImageLayerBase(object):
r = abstractproperty() # ComponentID for red channel
g = abstractproperty() # ComponentID for green channel
b = abstractproperty() # ComponentID for blue channel
rnorm = abstractproperty() # Normalize instance for red channel
gnorm = abstractproperty() # Normalize instance for green channel
bnorm = abstractproperty() # Normalize instance for blue channel
contrast_layer = abstractproperty() # 'red' | 'green' | 'blue'. Which norm to adjust during set_norm
layer_visible = abstractproperty() # dict (str->bool). Whether to show 'red', 'green', 'blue' layers
@property
def color_visible(self):
"""
Return layer visibility as a list of [red_visible, green_visible, blue_visible]
"""
return [self.layer_visible['red'], self.layer_visible['green'],
self.layer_visible['blue']]
@color_visible.setter
def color_visible(self, value):
self.layer_visible['red'] = value[0]
self.layer_visible['green'] = value[1]
self.layer_visible['blue'] = value[2]
@six.add_metaclass(ABCMeta)
class ImageLayerBase(object):
norm = abstractproperty() # Normalization instance to scale intensities
cmap = abstractproperty() # colormap
@abstractmethod
def set_norm(self, **kwargs):
"""
Adjust the normalization instance parameters.
See :class:`glue.viewers.image.ds9norm.DS9Normalize attributes for valid
kwargs for this function
"""
pass
@abstractmethod
def clear_norm():
"""
Reset the norm to the default
"""
pass
@abstractmethod
def override_image(self, image):
"""
Temporarily display another image instead of a view into the data
The new image has the same shape as the view into the data
"""
pass
@abstractmethod
def clear_override(self):
"""
Remove the override image, and display the data again
"""
pass
@six.add_metaclass(ABCMeta)
class SubsetImageLayerBase(object):
pass
class ImageLayerArtist(MatplotlibLayerArtist, ImageLayerBase):
_property_set = MatplotlibLayerArtist._property_set + ['norm']
def __init__(self, layer, ax):
super(ImageLayerArtist, self).__init__(layer, ax)
self._norm = None
self._cmap = gray
self._override_image = None
self._clip_cache = None
self.aspect = 'equal'
@property
def norm(self):
return self._norm
@norm.setter
def norm(self, value):
self._norm = value
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, value):
self._cmap = value
for a in self.artists:
a.set_cmap(value)
def _default_norm(self, layer):
vals = np.sort(layer.ravel())
vals = vals[np.isfinite(vals)]
result = DS9Normalize()
result.stretch = 'arcsinh'
result.clip = True
if vals.size > 0:
result.vmin = vals[np.intp(.01 * vals.size)]
result.vmax = vals[np.intp(.99 * vals.size)]
return result
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def _extract_view(self, view, transpose):
if self._override_image is None:
result = self.layer[view]
if transpose:
result = result.T
return result
else:
v = [v for v in view if isinstance(v, slice)]
if transpose:
v = v[::-1]
result = self._override_image[v]
return result
def _update_clip(self, att):
key = (att, id(self._override_image),
self.norm.clip_lo, self.norm.clip_hi)
if self._clip_cache == key:
return
self._clip_cache = key
if self._override_image is None:
data = small_view(self.layer, att)
else:
data = small_view_array(self._override_image)
self.norm.update_clip(data)
def update(self, view, transpose=False, aspect=None):
if aspect is not None:
self.aspect = aspect
self.clear()
views = view_cascade(self.layer, view)
artists = []
lr0 = self._extract_view(views[0], transpose)
self.norm = self.norm or self._default_norm(lr0)
self.norm = self.norm or self._default_norm(lr0)
self._update_clip(views[0][0])
for v in views:
image = self._extract_view(v, transpose)
extent = get_extent(v, transpose)
artists.append(self._axes.imshow(image, cmap=self.cmap,
norm=self.norm,
interpolation='nearest',
origin='lower',
extent=extent, zorder=0))
self._axes.set_aspect(self.aspect, adjustable='datalim')
self.artists = artists
self._sync_style()
def set_norm(self, vmin=None, vmax=None,
bias=None, contrast=None, stretch=None, norm=None,
clip_lo=None, clip_hi=None):
if norm is not None:
self.norm = norm # XXX Should wrap ala DS9Normalize(norm)
return norm
if self.norm is None:
self.norm = DS9Normalize()
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
if bias is not None:
self.norm.bias = bias
if contrast is not None:
self.norm.contrast = contrast
if clip_lo is not None:
self.norm.clip_lo = clip_lo
if clip_hi is not None:
self.norm.clip_hi = clip_hi
if stretch is not None:
self.norm.stretch = stretch
return self.norm
def clear_norm(self):
self.norm = None
def _sync_style(self):
for artist in self.artists:
artist.set_zorder(self.zorder)
artist.set_visible(self.visible and self.enabled)
class RGBImageLayerArtist(ImageLayerArtist, RGBImageLayerBase):
_property_set = ImageLayerArtist._property_set + \
['r', 'g', 'b', 'rnorm', 'gnorm', 'bnorm', 'color_visible']
r = ChangedTrigger()
g = ChangedTrigger()
b = ChangedTrigger()
rnorm = Pointer('_rnorm')
gnorm = Pointer('_gnorm')
bnorm = Pointer('_bnorm')
# dummy class-level variables will be masked
# at instance level, needed for ABC to be happy
layer_visible = None
contrast_layer = None
def __init__(self, layer, ax, last_view=None):
super(RGBImageLayerArtist, self).__init__(layer, ax)
self.contrast_layer = 'green'
self.aspect = 'equal'
self.layer_visible = dict(red=True, green=True, blue=True)
self.last_view = last_view
def set_norm(self, *args, **kwargs):
spr = super(RGBImageLayerArtist, self).set_norm
if self.contrast_layer == 'red':
self.norm = self.rnorm
self.rnorm = spr(*args, **kwargs)
if self.contrast_layer == 'green':
self.norm = self.gnorm
self.gnorm = spr(*args, **kwargs)
if self.contrast_layer == 'blue':
self.norm = self.bnorm
self.bnorm = spr(*args, **kwargs)
def update(self, view=None, transpose=False, aspect=None):
self.clear()
if aspect is not None:
self.aspect = aspect
if self.r is None or self.g is None or self.b is None:
return
if view is None:
view = self.last_view
if view is None:
return
self.last_view = view
views = view_cascade(self.layer, view)
artists = []
for v in views:
extent = get_extent(v, transpose)
# first argument = component. swap
r = tuple([self.r] + list(v[1:]))
g = tuple([self.g] + list(v[1:]))
b = tuple([self.b] + list(v[1:]))
r = self.layer[r]
g = self.layer[g]
b = self.layer[b]
if transpose:
r = r.T
g = g.T
b = b.T
self.rnorm = self.rnorm or self._default_norm(r)
self.gnorm = self.gnorm or self._default_norm(g)
self.bnorm = self.bnorm or self._default_norm(b)
if v is views[0]:
self.rnorm.update_clip(small_view(self.layer, self.r))
self.gnorm.update_clip(small_view(self.layer, self.g))
self.bnorm.update_clip(small_view(self.layer, self.b))
image = np.dstack((self.rnorm(r),
self.gnorm(g),
self.bnorm(b)))
if not self.layer_visible['red']:
image[:, :, 0] *= 0
if not self.layer_visible['green']:
image[:, :, 1] *= 0
if not self.layer_visible['blue']:
image[:, :, 2] *= 0
artists.append(self._axes.imshow(image,
interpolation='nearest',
origin='lower',
extent=extent, zorder=0))
self._axes.set_aspect(self.aspect, adjustable='datalim')
self.artists = artists
self._sync_style()
class SubsetImageLayerArtist(MatplotlibLayerArtist, SubsetImageLayerBase):
def __init__(self, *args, **kwargs):
super(SubsetImageLayerArtist, self).__init__(*args, **kwargs)
self.aspect = 'equal'
def update(self, view, transpose=False, aspect=None):
self.clear()
if aspect is not None:
self.aspect = aspect
subset = self.layer
logging.debug("View into subset %s is %s", self.layer, view)
try:
mask = subset.to_mask(view[1:])
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
logging.debug("View mask has shape %s", mask.shape)
# shortcut for empty subsets
if not mask.any():
return
if transpose:
mask = mask.T
extent = get_extent(view, transpose)
r, g, b = color2rgb(self.layer.style.color)
mask = np.dstack((r * mask, g * mask, b * mask, mask * .5))
mask = (255 * mask).astype(np.uint8)
self.artists = [self._axes.imshow(mask, extent=extent,
interpolation='nearest',
origin='lower',
zorder=5, visible=self.visible)]
self._axes.set_aspect(self.aspect, adjustable='datalim')
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/image/layer_artist.py",
"copies": "2",
"size": "11664",
"license": "bsd-3-clause",
"hash": 1057277214242277200,
"line_mean": 31.7640449438,
"line_max": 106,
"alpha_frac": 0.5568415638,
"autogenerated": false,
"ratio": 4.038781163434903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5595622727234904,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from contextlib import contextmanager
from weakref import WeakKeyDictionary
from inspect import getmro
from collections import defaultdict
from glue.core.exceptions import InvalidSubscriber, InvalidMessage
from glue.core.message import Message
from glue.core.hub_callback_container import HubCallbackContainer
__all__ = ['Hub', 'HubListener']
class Hub(object):
"""The hub manages communication between subscribers.
Objects :func:`subscribe` to receive specific message types. When
a message is passed to :func:`broadcast`, the hub observes the
following protocol:
* For each subscriber, it looks for a message class
subscription that is a superclass of the input message type
(if several are found, the most-subclassed one is chosen)
* If one is found, it calls the subscriptions filter(message)
class (if provided)
* If filter(message) == True, it calls handler(message)
(or notify(message) if handler wasn't provided).
"""
def __init__(self, *args):
"""
Any arguments that are passed to Hub will be registered
to the new hub object.
"""
# Dictionary of subscriptions
self._subscriptions = WeakKeyDictionary()
self._paused = False
self._queue = []
from glue.core.data import Data
from glue.core.subset import Subset
from glue.core.data_collection import DataCollection
listeners = set(filter(lambda x: isinstance(x, HubListener), args))
data = set(filter(lambda x: isinstance(x, Data), args))
subsets = set(filter(lambda x: isinstance(x, Subset), args))
dcs = set(filter(lambda x: isinstance(x, DataCollection), args))
listeners -= (data | subsets | dcs)
if set(listeners | data | subsets | dcs) != set(args):
raise TypeError("Inputs must be HubListener, data, subset, or "
"data collection objects")
for l in listeners:
l.register_to_hub(self)
for d in data:
d.register_to_hub(self)
for dc in dcs:
dc.register_to_hub(self)
for s in subsets:
s.register()
def subscribe(self, subscriber, message_class,
handler=None,
filter=lambda x: True):
"""Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:type handler: Callable
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
:type filter: Callable
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
"""
if not isinstance(subscriber, HubListener):
raise InvalidSubscriber("Subscriber must be a HubListener: %s" %
type(subscriber))
if not isinstance(message_class, type) or \
not issubclass(message_class, Message):
raise InvalidMessage("message class must be a subclass of "
"glue.Message: %s" % type(message_class))
logging.getLogger(__name__).info("Subscribing %s to %s",
subscriber, message_class.__name__)
if not handler:
handler = subscriber.notify
if subscriber not in self._subscriptions:
self._subscriptions[subscriber] = HubCallbackContainer()
self._subscriptions[subscriber][message_class] = handler, filter
def is_subscribed(self, subscriber, message):
"""
Test whether the subscriber has suscribed to a given message class
:param subscriber: The subscriber to test
:param message: The message class to test
Returns:
True if the subscriber/message pair have been subscribed to the hub
"""
return (subscriber in self._subscriptions and
message in self._subscriptions[subscriber])
def get_handler(self, subscriber, message):
if subscriber is None:
return None
try:
return self._subscriptions[subscriber][message][0]
except KeyError:
return None
def unsubscribe(self, subscriber, message):
"""
Remove a (subscriber,message) pair from subscription list.
The handler originally attached to the subscription will
no longer be called when broadcasting messages of type message
"""
if subscriber not in self._subscriptions:
return
if message in self._subscriptions[subscriber]:
self._subscriptions[subscriber].pop(message)
def unsubscribe_all(self, subscriber):
"""
Unsubscribe the object from any subscriptions.
"""
if subscriber in self._subscriptions:
self._subscriptions.pop(subscriber)
def _find_handlers(self, message):
"""Yields all (subscriber, handler) pairs that should receive a message
"""
# self._subscriptions:
# subscriber => { message type => (filter, handler)}
# loop over subscribed objects
for subscriber, subscriptions in list(self._subscriptions.items()):
# subscriptions to message or its superclasses
messages = [msg for msg in subscriptions.keys() if
issubclass(type(message), msg)]
if len(messages) == 0:
continue
# narrow to the most-specific message
candidate = max(messages, key=_mro_count)
handler, test = subscriptions[candidate]
if test(message):
yield subscriber, handler
@contextmanager
def delay_callbacks(self):
self._paused = True
try:
yield
finally:
self._paused = False
# TODO: could de-duplicate messages here
for message in self._queue:
self.broadcast(message)
self._queue = []
def broadcast(self, message):
"""Broadcasts a message to all subscribed objects.
:param message: The message to broadcast
:type message: :class:`~glue.core.message.Message`
"""
if self._paused:
self._queue.append(message)
else:
logging.getLogger(__name__).info("Broadcasting %s", message)
for subscriber, handler in self._find_handlers(message):
handler(message)
def __getstate__(self):
""" Return a picklable representation of the hub
Note: Only objects in glue.core are currently supported
as pickleable. Thus, any subscriptions from objects outside
glue.core will note be saved or restored
"""
result = self.__dict__.copy()
result['_subscriptions'] = self._subscriptions.copy()
for s in self._subscriptions:
try:
module = s.__module__
except AttributeError:
module = ''
if not module.startswith('glue.core'):
print('Pickle warning: Hub removing subscription to %s' % s)
result['_subscriptions'].pop(s)
return result
class HubListener(object):
"""
The base class for any object that subscribes to hub messages.
This interface defines a single method, notify, that receives
messages
"""
def register_to_hub(self, hub):
raise NotImplementedError
def unregister(self, hub):
""" Default unregistration action. Calls hub.unsubscribe_all on self"""
hub.unsubscribe_all(self)
def notify(self, message):
raise NotImplementedError("Message has no handler: %s" % message)
def _mro_count(obj):
return len(getmro(obj))
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/hub.py",
"copies": "2",
"size": "8577",
"license": "bsd-3-clause",
"hash": 1125380766443342500,
"line_mean": 33.4457831325,
"line_max": 79,
"alpha_frac": 0.6083712254,
"autogenerated": false,
"ratio": 4.870528109028961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014343086632243257,
"num_lines": 249
} |
from __future__ import absolute_import, division, print_function
import logging
from contextlib import contextmanager
import string
from itertools import count
import numpy as np
import pandas as pd
from ..external.six.moves import reduce
from ..external.six import string_types
__all__ = ["identity", "relim", "split_component_view", "join_component_view",
"facet_subsets", "colorize_subsets", "defer", "disambiguate",
"row_lookup", "PropertySetMixin", "CallbackMixin", "Pointer"]
def identity(x):
return x
def relim(lo, hi, log=False):
logging.getLogger(__name__).debug("Inputs to relim: %r %r", lo, hi)
x, y = lo, hi
if log:
if lo < 0:
x = 1e-5
if hi < 0:
y = 1e5
return x * .95, y * 1.05
delta = y - x
return (x - .02 * delta, y + .02 * delta)
def split_component_view(arg):
"""Split the input to data or subset.__getitem__ into its pieces.
:param arg: The input passed to data or subset.__getitem__.
Assumed to be either a scalar or tuple
:rtype: tuple
The first item is the Component selection (a ComponentID or
string)
The second item is a view (tuple of slices, slice scalar, or view
object)
"""
if isinstance(arg, tuple):
if len(arg) == 1:
raise TypeError("Expected a scalar or >length-1 tuple, "
"got length-1 tuple")
if len(arg) == 2:
return arg[0], arg[1]
return arg[0], arg[1:]
else:
return arg, None
def join_component_view(component, view):
"""Pack a componentID and optional view into single tuple
Returns an object compatible with data.__getitem__ and related
methods. Handles edge cases of when view is None, a scalar, a
tuple, etc.
:param component: ComponentID
:param view: view into data, or None
"""
if view is None:
return component
result = [component]
try:
result.extend(view)
except TypeError: # view is a scalar
result = [component, view]
return tuple(result)
def facet_subsets(data_collection, cid, lo=None, hi=None, steps=5,
prefix='', log=False):
"""Create a series of subsets that partition the values of
a particular attribute into several bins
This creates `steps` new subet groups, adds them to the data collection,
and returns the list of newly created subset groups.
:param data: DataCollection object to use
:type data: :class:`~glue.core.data_collection.DataCollection`
:param cid: ComponentID to facet on
:type data: :class:`~glue.core.data.ComponentID`
:param lo: The lower bound for the faceting. Defaults to minimum value
in data
:type lo: float
:param hi: The upper bound for the faceting. Defaults to maximum
value in data
:type hi: float
:param steps: The number of subsets to create. Defaults to 5
:type steps: int
:param prefix: If present, the new subset labels will begin with `prefix`
:type prefix: str
:param log: If True, space divisions logarithmically. Default=False
:type log: bool
:returns: List of :class:`~glue.core.subset_group.SubsetGroup` instances
added to `data`
Example::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2)
creates 2 new subsets. The first represents the constraint 0 <=
mass < 5. The second represents 5 <= mass < 10::
facet_subset(data, data.id['mass'], lo=10, hi=0, steps=2)
Creates 2 new subsets. The first represents the constraint 10 >= x > 5
The second represents 5 >= mass > 0::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2, prefix='m')
Labels the subsets ``m_1`` and ``m_2``
"""
from .exceptions import IncompatibleAttribute
if lo is None or hi is None:
for data in data_collection:
try:
vals = data[cid]
break
except IncompatibleAttribute:
continue
else:
raise ValueError("Cannot infer data limits for ComponentID %s"
% cid)
if lo is None:
lo = np.nanmin(vals)
if hi is None:
hi = np.nanmax(vals)
reverse = lo > hi
if log:
rng = np.logspace(np.log10(lo), np.log10(hi), steps + 1)
else:
rng = np.linspace(lo, hi, steps + 1)
states = []
labels = []
for i in range(steps):
if reverse:
states.append((cid <= rng[i]) & (cid > rng[i + 1]))
labels.append(prefix + '{0}<{1}<={2}'.format(rng[i + 1], cid, rng[i]))
else:
states.append((cid >= rng[i]) & (cid < rng[i + 1]))
labels.append(prefix + '{0}<={1}<{2}'.format(rng[i], cid, rng[i + 1]))
result = []
for lbl, s in zip(labels, states):
sg = data_collection.new_subset_group(label=lbl, subset_state=s)
result.append(sg)
return result
def colorize_subsets(subsets, cmap, lo=0, hi=1):
"""Re-color a list of subsets according to a colormap
:param subsets: List of subsets
:param cmap: Matplotlib colormap instance
:param lo: Start location in colormap. 0-1. Defaults to 0
:param hi: End location in colormap. 0-1. Defaults to 1
The colormap will be sampled at `len(subsets)` even intervals
between `lo` and `hi`. The color at the `ith` interval will be
applied to `subsets[i]`
"""
from matplotlib import cm
sm = cm.ScalarMappable(cmap=cmap)
sm.norm.vmin = 0
sm.norm.vmax = 1
vals = np.linspace(lo, hi, len(subsets))
rgbas = sm.to_rgba(vals)
for color, subset in zip(rgbas, subsets):
r, g, b, a = color
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
subset.style.color = '#%2.2x%2.2x%2.2x' % (r, g, b)
class PropertySetMixin(object):
"""An object that provides a set of properties that
are meant to encapsulate state information
This class exposes a properties attribute, which is a dict
of all properties. Similarly, assigning to the properties dict
will update the individual properties
"""
_property_set = []
@property
def properties(self):
""" A dict mapping property names to values """
return dict((p, getattr(self, p)) for p in self._property_set)
@properties.setter
def properties(self, value):
""" Update the properties with a new dict.
Keys in the new dict must be valid property names defined in
the _property_set class level attribute"""
invalid = set(value.keys()) - set(self._property_set)
if invalid:
raise ValueError("Invalid property values: %s" % invalid)
for k in self._property_set:
if k not in value:
continue
setattr(self, k, value[k])
class CallbackMixin(object):
"""
A mixin that provides a utility for attaching callback
functions to methods
"""
def __init__(self):
self._callbacks = []
def add_callback(self, function):
self._callbacks.append(function)
def remove_callback(self, function):
self._callbacks.remove(function)
def notify(self, *args, **kwargs):
for func in self._callbacks:
func(*args, **kwargs)
class Pointer(object):
def __init__(self, key):
self.key = key
def __get__(self, instance, type=None):
val = instance
for k in self.key.split('.'):
val = getattr(val, k, None)
return val
def __set__(self, instance, value):
v = self.key.split('.')
attr = reduce(getattr, [instance] + v[:-1])
setattr(attr, v[-1], value)
@contextmanager
def defer(instance, method):
"""
Defer the calling of a method inside a context manager,
and then call it 0 or 1 times afterwards.
:param instance: The instance of the method to defer
:param method: The name of the method to defer
:type method: str
Within the context block, calls to the method will be
intercepted, logged, and skipped.
Upon exiting the context block, the method will be
invoked a single time, with the arguments of the
most recent invokation inside the context block.
If the method is never invoked in the context block,
it is not called when leaving that block.
"""
history = []
def log(*a, **k):
history.append((a, k))
orig = getattr(instance, method)
setattr(instance, method, log)
try:
yield
finally:
setattr(instance, method, orig)
for a, k in history[-1:]:
orig(*a, **k)
def disambiguate(label, taken):
"""If necessary, add a suffix to label to avoid name conflicts
:param label: desired label
:param taken: set of taken names
Returns label if it is not in the taken set. Otherwise, returns
label_NN where NN is the lowest integer such that label_NN not in taken.
"""
if label not in taken:
return label
suffix = "_%2.2i"
label = str(label)
for i in count(1):
candidate = label + (suffix % i)
if candidate not in taken:
return candidate
def row_lookup(data, categories):
"""
Lookup which row in categories each data item is equal to
:param data: array-like
:param categories: array-like of unique values
:returns: Float array.
If result[i] is finite, then data[i] = categoreis[result[i]]
Otherwise, data[i] is not in the categories list
"""
# np.searchsorted doesn't work on mixed types in Python3
ndata, ncat = len(data), len(categories)
data = pd.DataFrame({'data': data, 'row': np.arange(ndata)})
cats = pd.DataFrame({'categories': categories,
'cat_row': np.arange(ncat)})
m = pd.merge(data, cats, left_on='data', right_on='categories')
result = np.zeros(ndata, dtype=float) * np.nan
result[np.array(m.row)] = m.cat_row
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/util.py",
"copies": "1",
"size": "10109",
"license": "bsd-3-clause",
"hash": 7090319416678223000,
"line_mean": 27.9656160458,
"line_max": 82,
"alpha_frac": 0.607280641,
"autogenerated": false,
"ratio": 3.8716966679433167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49789773089433165,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from datetime import datetime
from errno import ENOTEMPTY
from io import BytesIO
from fsspec import AbstractFileSystem
logger = logging.Logger("fsspec.memoryfs")
class MemoryFileSystem(AbstractFileSystem):
"""A filesystem based on a dict of BytesIO objects
This is a global filesystem so instances of this class all point to the same
in memory filesystem.
"""
store = {} # global
pseudo_dirs = [""]
protocol = "memory"
root_marker = "/"
@classmethod
def _strip_protocol(cls, path):
if path.startswith("memory://"):
path = path[len("memory://") :]
if "::" in path or "://" in path:
return path.rstrip("/")
path = path.lstrip("/").rstrip("/")
return "/" + path if path else ""
def ls(self, path, detail=False, **kwargs):
path = self._strip_protocol(path)
if path in self.store:
# there is a key with this exact name
return [
{
"name": path,
"size": self.store[path].getbuffer().nbytes,
"type": "file",
"created": self.store[path].created,
}
]
paths = set()
starter = path + "/"
out = []
for p2 in self.store:
if p2.startswith(starter):
if "/" not in p2[len(starter) :]:
# exact child
out.append(
{
"name": p2,
"size": self.store[p2].getbuffer().nbytes,
"type": "file",
"created": self.store[p2].created,
}
)
elif len(p2) > len(starter):
# implied child directory
ppath = starter + p2[len(starter) :].split("/", 1)[0]
if ppath not in paths:
out = out or []
out.append(
{
"name": ppath,
"size": 0,
"type": "directory",
}
)
paths.add(ppath)
for p2 in self.pseudo_dirs:
if p2.startswith(starter):
if "/" not in p2[len(starter) :]:
# exact child pdir
if p2 not in paths:
out.append({"name": p2, "size": 0, "type": "directory"})
paths.add(p2)
else:
# directory implied by deeper pdir
ppath = starter + p2[len(starter) :].split("/", 1)[0]
if ppath not in paths:
out.append({"name": ppath, "size": 0, "type": "directory"})
paths.add(ppath)
if not out:
if path in self.pseudo_dirs:
# empty dir
return []
raise FileNotFoundError(path)
if detail:
return out
return sorted([f["name"] for f in out])
def mkdir(self, path, create_parents=True, **kwargs):
path = self._strip_protocol(path)
if path in self.store or path in self.pseudo_dirs:
raise FileExistsError
if self._parent(path).strip("/") and self.isfile(self._parent(path)):
raise NotADirectoryError(self._parent(path))
if create_parents and self._parent(path).strip("/"):
try:
self.mkdir(self._parent(path), create_parents, **kwargs)
except FileExistsError:
pass
if path and path not in self.pseudo_dirs:
self.pseudo_dirs.append(path)
def makedirs(self, path, exist_ok=False):
try:
self.mkdir(path, create_parents=True)
except FileExistsError:
if not exist_ok:
raise
def rmdir(self, path):
path = self._strip_protocol(path)
if path in self.pseudo_dirs:
if not self.ls(path):
self.pseudo_dirs.remove(path)
else:
raise OSError(ENOTEMPTY, "Directory not empty", path)
else:
raise FileNotFoundError(path)
def exists(self, path):
path = self._strip_protocol(path)
return path in self.store or path in self.pseudo_dirs
def info(self, path, **kwargs):
path = self._strip_protocol(path)
if path in self.pseudo_dirs or any(
p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
):
return {
"name": path,
"size": 0,
"type": "directory",
}
elif path in self.store:
return {
"name": path,
"size": self.store[path].getbuffer().nbytes,
"type": "file",
"created": self.store[path].created,
}
else:
raise FileNotFoundError(path)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if path in self.pseudo_dirs:
raise IsADirectoryError
parent = path
while len(parent) > 1:
parent = self._parent(parent)
if self.isfile(parent):
raise FileExistsError(parent)
if mode in ["rb", "ab", "rb+"]:
if path in self.store:
f = self.store[path]
if mode == "ab":
# position at the end of file
f.seek(0, 2)
else:
# position at the beginning of file
f.seek(0)
return f
else:
raise FileNotFoundError(path)
if mode == "wb":
m = MemoryFile(self, path)
if not self._intrans:
m.commit()
return m
def cp_file(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1)
path2 = self._strip_protocol(path2)
if self.isfile(path1):
self.store[path2] = MemoryFile(self, path2, self.store[path1].getbuffer())
elif self.isdir(path1):
if path2 not in self.pseudo_dirs:
self.pseudo_dirs.append(path2)
else:
raise FileNotFoundError
def cat_file(self, path, start=None, end=None, **kwargs):
path = self._strip_protocol(path)
try:
return self.store[path].getvalue()[start:end]
except KeyError:
raise FileNotFoundError(path)
def _rm(self, path):
path = self._strip_protocol(path)
try:
del self.store[path]
except KeyError as e:
raise FileNotFoundError from e
def rm(self, path, recursive=False, maxdepth=None):
paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
for p in reversed(paths):
# If the expanded path doesn't exist, it is only because the expanded
# path was a directory that does not exist in self.pseudo_dirs. This
# is possible if you directly create files without making the
# directories first.
if not self.exists(p):
continue
if self.isfile(p):
self.rm_file(p)
else:
self.rmdir(p)
class MemoryFile(BytesIO):
"""A BytesIO which can't close and works as a context manager
Can initialise with data. Each path should only be active once at any moment.
No need to provide fs, path if auto-committing (default)
"""
def __init__(self, fs=None, path=None, data=None):
self.fs = fs
self.path = path
self.created = datetime.utcnow().timestamp()
if data:
self.write(data)
self.size = len(data)
self.seek(0)
def __enter__(self):
return self
def close(self):
position = self.tell()
self.size = self.seek(0, 2)
self.seek(position)
def discard(self):
pass
def commit(self):
self.fs.store[self.path] = self
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/implementations/memory.py",
"copies": "1",
"size": "8484",
"license": "bsd-3-clause",
"hash": 8873974750892605000,
"line_mean": 32.2705882353,
"line_max": 86,
"alpha_frac": 0.4909240924,
"autogenerated": false,
"ratio": 4.404984423676012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5395908516076012,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from functools import wraps
import numpy as np
from ..external.modest_image import extract_matched_slices
from ..core.exceptions import IncompatibleAttribute
from ..core.data import Data
from ..core.subset import Subset, RoiSubsetState
from ..core.roi import PolygonalROI
from ..core.message import ComponentReplacedMessage
from ..core.callback_property import (
callback_property, CallbackProperty)
from ..core.edit_subset_mode import EditSubsetMode
from ..utils import lookup_class, defer_draw
from .viz_client import VizClient, init_mpl
from .layer_artist import (ScatterLayerArtist, LayerArtistContainer,
ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist,
ImageLayerBase, RGBImageLayerBase,
SubsetImageLayerBase, ScatterLayerBase)
def requires_data(func):
"""Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present"""
@wraps(func)
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
display_data = CallbackProperty(None)
display_attribute = CallbackProperty(None)
def __init__(self, data, artist_container=None):
VizClient.__init__(self, data)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
# slice through ND cube
# ('y', 'x', 2)
# means current data slice is [:, :, 2], and axis=0 is vertical on plot
self._slice = None
# how to extract a downsampled/cropped 2D image to plot
# (ComponentID, slice, slice, ...)
self._view = None
# cropped/downsampled image
# self._image == self.display_data[self._view]
self._image = None
# if this is set, render this instead of self._image
self._override_image = None
# maps attributes -> normalization settings
self._norm_cache = {}
def point_details(self, x, y):
if self.display_data is None:
return dict(labels=['x=%s' % x, 'y=%s' % y],
pix=(x, y), world=(x, y), value=np.nan)
data = self.display_data
pix = self._pixel_coords(x, y)
labels = self.coordinate_labels(pix)
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
view = []
for p, s in zip(pix, data.shape):
p = int(p)
if not (0 <= p < s):
value = None
break
view.append(slice(p, p + 1))
else:
if self._override_image is None:
value = self.display_data[self.display_attribute, view]
else:
value = self._override_image[int(y), int(x)]
value = value.ravel()[0]
return dict(pix=pix, world=world, labels=labels, value=value)
def coordinate_labels(self, pix):
""" Return human-readable labels for a position in pixel coords
:param pix: tuple of ints
Pixel coordiante of point in the data
:returns: List of strings, one for each coordinate axis, of the
form "axis_lable_name=world_coordinate_value
:note: pix describes a position in the *data*, not necessarily
the image display
"""
data = self.display_data
if data is None:
return []
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
labels = ['%s=%s' % (data.get_world_component_id(i).label, w)
for i, w in enumerate(world)]
return labels
@callback_property
def slice(self):
"""
Returns a tuple describing the current slice through the data
The tuple has length equal to the dimensionality of the display
data. Each entry is either:
'x' if the dimension is mapped to the X image axis
'y' if the dimension is mapped to the Y image axis
a number, indicating which fixed slice the dimension is restricted to
"""
if self._slice is not None:
return self._slice
if self.display_data is None:
return tuple()
ndim = self.display_data.ndim
if ndim == 1:
self._slice = ('x',)
elif ndim == 2:
self._slice = ('y', 'x')
else:
self._slice = (0,) * (ndim - 2) + ('y', 'x')
return self._slice
@slice.setter
@defer_draw
def slice(self, value):
if self.slice == tuple(value):
return
if value == tuple():
return
relim = value.index('x') != self._slice.index('x') or \
value.index('y') != self._slice.index('y')
self._slice = tuple(value)
self._clear_override()
self._update_axis_labels()
self._update_data_plot(relim=relim)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
@property
def is_3D(self):
"""
Returns True if the display data has 3 dimensions """
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
"""
For 3D data, returns the pixel index of the current slice.
Otherwise, returns None
"""
if self.is_3D:
for s in self.slice:
if s not in ['x', 'y']:
return s
return None
@property
def image(self):
return self._image
@requires_data
def override_image(self, image):
"""Temporarily override the current slice view with another
image (i.e., an aggregate)
"""
self._override_image = image
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.override_image(image)
self._update_data_plot()
self._redraw()
def _clear_override(self):
self._override_image = None
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.clear_override()
@slice_ind.setter
@defer_draw
def slice_ind(self, value):
if self.is_3D:
slc = [s if s in ['x', 'y'] else value for s in self.slice]
self.slice = slc
self._update_data_plot()
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
else:
raise IndexError("Can only set slice_ind for 3D images")
def can_image_data(self, data):
return data.ndim > 1
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
@defer_draw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
self._slice = None
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen
"""
pass
@requires_data
@defer_draw
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
@defer_draw
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
x, y = np.s_[:], np.s_[:]
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
@requires_data
def _numerical_data_changed(self, message):
data = message.sender
self._update_data_plot(force=True)
self._update_scatter_layer(data)
for s in data.subsets:
self._update_subset_single(s, force=True)
self._redraw()
@requires_data
def _update_data_plot(self, relim=False, force=False):
"""
Re-sync the main image and its subsets
"""
if relim:
self.relim()
view = self._build_view()
self._image = self.display_data[view]
transpose = self.slice.index('x') < self.slice.index('y')
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerBase)) and \
a.layer.data is not self.display_data:
self.artists.remove(a)
else:
a.update(view, transpose)
for a in self.artists[self.display_data]:
meth = a.update if not force else a.force_update
meth(view, transpose=transpose)
def _update_subset_single(self, s, redraw=False, force=False):
"""
Update the location and visual properties
of each point in a single subset
Parameters:
----------
s: A subset instance
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
if s not in self.artists:
return
self._update_scatter_layer(s)
if s.data is not self.display_data:
return
view = self._build_view()
transpose = self.slice.index('x') < self.slice.index('y')
for a in self.artists[s]:
meth = a.update if not force else a.force_update
meth(view, transpose)
if redraw:
self._redraw()
@property
def _slice_ori(self):
if not self.is_3D:
return None
for i, s in enumerate(self.slice):
if s not in ['x', 'y']:
return i
@requires_data
@defer_draw
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_plot_attributes()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
if layer is self.display_data:
if len(self.artists) > 0:
self.display_data = self.artists.layers[0].data
else:
self.display_data = None
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
# only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def rgb_mode(self, enable=None):
""" Query whether RGB mode is enabled, or toggle RGB mode
:param enable: bool, or None
If True or False, explicitly enable/disable RGB mode.
If None, check if RGB mode is enabled
:rtype: LayerArtist or None
If RGB mode is enabled, returns an RGBImageLayerBase
If enable=False, return the new ImageLayerArtist
"""
# XXX need to better handle case where two RGBImageLayerArtists
# are created
if enable is None:
for a in self.artists:
if isinstance(a, RGBImageLayerBase):
return a
return None
result = None
layer = self.display_data
if enable:
layer = self.display_data
a = self._new_rgb_layer(layer)
if a is None:
return
a.r = a.g = a.b = self.display_attribute
with self.artists.ignore_empty():
self.artists.pop(layer)
self.artists.append(a)
result = a
else:
with self.artists.ignore_empty():
for artist in list(self.artists):
if isinstance(artist, RGBImageLayerBase):
self.artists.remove(artist)
result = self.add_layer(layer)
self._update_data_plot()
self._redraw()
return result
def add_layer(self, layer):
if layer in self.artists:
return self.artists[layer][0]
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
# if data is 1D, try to scatter plot
if len(layer.data.shape) == 1:
return self.add_scatter_layer(layer)
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
result = self._new_image_layer(layer)
self.artists.append(result)
for s in layer.subsets:
self.add_layer(s)
self.set_data(layer)
elif isinstance(layer, Subset):
result = self._new_subset_image_layer(layer)
self.artists.append(result)
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
return result
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
result = self._new_scatter_layer(layer)
self.artists.append(result)
self._update_scatter_layer(layer)
return result
def _update_scatter_plots(self):
for layer in self.artists.layers:
self._update_scatter_layer(layer)
@requires_data
def _update_scatter_layer(self, layer, force=False):
if layer not in self.artists:
return
xatt, yatt = self._get_plot_attributes()
need_redraw = False
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerBase):
continue
need_redraw = True
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self.slice_ind) & (zatt <= self.slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update() if not force else a.force_update()
a.redraw()
if need_redraw:
self._redraw()
@requires_data
def _get_plot_attributes(self):
x, y = _slice_axis(self.display_data.shape, self.slice)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""From a slice coordinate (x,y), return the full (possibly
>2D) numpy index into the full data
*Note*
The inputs to this function are the reverse of numpy convention
(horizontal axis first, then vertical)
*Returns*
Either (x,y) or (x,y,z)
"""
result = list(self.slice)
result[result.index('x')] = x
result[result.index('y')] = y
return result
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Can only set slice_ori for 3D images")
if ori == 0:
self.slice = (0, 'y', 'x')
elif ori == 1:
self.slice = ('y', 0, 'x')
elif ori == 2:
self.slice = ('y', 'x', 0)
else:
raise ValueError("Orientation must be 0, 1, or 2")
def restore_layers(self, layers, context):
""" Restore a list of glue-serialized layer dicts """
for layer in layers:
c = lookup_class(layer.pop('_type'))
props = dict((k, v if k == 'stretch' else context.object(v))
for k, v in layer.items())
l = props['layer']
if issubclass(c, ScatterLayerBase):
l = self.add_scatter_layer(l)
elif issubclass(c, RGBImageLayerBase):
r = props.pop('r')
g = props.pop('g')
b = props.pop('b')
self.display_data = l
self.display_attribute = r
l = self.rgb_mode(True)
l.r = r
l.g = g
l.b = b
elif issubclass(c, (ImageLayerBase, SubsetImageLayerBase)):
if isinstance(l, Data):
self.set_data(l)
l = self.add_layer(l)
else:
raise ValueError("Cannot restore layer of type %s" % l)
l.properties = props
def _on_component_replace(self, msg):
if self.display_attribute is msg.old:
self.display_attribute = msg.new
def register_to_hub(self, hub):
super(ImageClient, self).register_to_hub(hub)
hub.subscribe(self,
ComponentReplacedMessage,
self._on_component_replace)
# subclasses should override the following methods as appropriate
def _new_rgb_layer(self, layer):
"""
Construct and return an RGBImageLayerBase for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
"""
Construct and return a SubsetImageLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_image_layer(self, layer):
"""
Construct and return an ImageLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_scatter_layer(self, layer):
"""
Construct and return a ScatterLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _update_axis_labels(self):
"""
Sync the displays for labels on X/Y axes, because
the data or slice has changed
"""
raise NotImplementedError()
def relim(self):
"""
Reset view window to the default pan/zoom setting.
"""
pass
def show_crosshairs(self, x, y):
pass
def clear_crosshairs(self):
pass
class MplImageClient(ImageClient):
def __init__(self, data, figure=None, axes=None, artist_container=None):
super(MplImageClient, self).__init__(data, artist_container)
if axes is not None:
raise ValueError("ImageClient does not accept an axes")
self._setup_mpl(figure, axes)
# description of field of view and center of image
self._view_window = None
# artist for a crosshair
self._crosshairs = None
def _setup_mpl(self, figure, axes):
figure, axes = init_mpl(figure, axes, wcs=True)
self._axes = axes
self._axes.get_xaxis().set_ticks([])
self._axes.get_yaxis().set_ticks([])
self._figure = figure
# custom axes formatter
def format_coord(x, y):
data = self.display_data
if data is None:
# MPL default method
return type(self._axes).format_coord(self._axes, x, y)
info = self.point_details(x, y)
return ' '.join(info['labels'])
self._axes.format_coord = format_coord
self._cid = self._axes.figure.canvas.mpl_connect('button_release_event',
self.check_update)
if hasattr(self._axes.figure.canvas, 'homeButton'):
# test code doesn't always use Glue's custom FigureCanvas
self._axes.figure.canvas.homeButton.connect(self.check_update)
@property
def axes(self):
return self._axes
def check_update(self, *args):
"""
For the MPL client, see if the view window has changed enough
such that the images should be resampled
"""
logging.getLogger(__name__).debug("check update")
vw = _view_window(self._axes)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_data_plot()
self._update_subset_plots()
self._redraw()
self._view_window = vw
@requires_data
def _update_axis_labels(self):
labels = _axis_labels(self.display_data, self.slice)
self._update_wcs_axes(self.display_data, self.slice)
self._axes.set_xlabel(labels[1])
self._axes.set_ylabel(labels[0])
@defer_draw
def _update_wcs_axes(self, data, slc):
wcs = getattr(data.coords, 'wcs', None)
if wcs is not None and hasattr(self.axes, 'reset_wcs'):
self.axes.reset_wcs(wcs, slices=slc[::-1])
def _redraw(self):
self._axes.figure.canvas.draw()
def relim(self):
shp = _2d_shape(self.display_data.shape, self.slice)
self._axes.set_xlim(0, shp[1])
self._axes.set_ylim(0, shp[0])
def _new_rgb_layer(self, layer):
v = self._view or self._build_view()
a = RGBImageLayerArtist(layer, self._axes, last_view=v)
return a
def _new_image_layer(self, layer):
return ImageLayerArtist(layer, self._axes)
def _new_subset_image_layer(self, layer):
return SubsetImageLayerArtist(layer, self._axes)
def _new_scatter_layer(self, layer):
return ScatterLayerArtist(layer, self._axes)
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self.slice)
v = extract_matched_slices(self._axes, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
def show_crosshairs(self, x, y):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs, = self._axes.plot([x], [y], '+', ms=12,
mfc='none', mec='#d32d26',
mew=2, zorder=100)
self._redraw()
def clear_crosshairs(self):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs = None
def _2d_shape(shape, slc):
"""Return the shape of the 2D slice through a 2 or 3D image
"""
# - numpy ordering here
return shape[slc.index('y')], shape[slc.index('x')]
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the
x and y axes of the image
:param shape: Shape of original data. tuple of ints
:param slc: Slice through the data, tuple of ints, 'x', and 'y'
"""
return slc.index('x'), slc.index('y')
def _axis_labels(data, slc):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
return names[slc.index('y')], names[slc.index('x')]
def _view_window(ax):
""" Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = (ax.transAxes.transform([(1, 1)]) - ax.transAxes.transform([(0, 0)]))[0]
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""Choose a default ComponentID to display for data
Returns PRIMARY if present
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
| {
"repo_name": "JudoWill/glue",
"path": "glue/clients/image_client.py",
"copies": "1",
"size": "26469",
"license": "bsd-3-clause",
"hash": -8795166618981454000,
"line_mean": 29.8856476079,
"line_max": 82,
"alpha_frac": 0.558464619,
"autogenerated": false,
"ratio": 3.9850948509485096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504355946994851,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from functools import wraps
import numpy as np
from glue.external.modest_image import extract_matched_slices
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.callback_property import (
callback_property, CallbackProperty)
from glue.core.message import ComponentReplacedMessage, SettingsChangeMessage
from glue.core.roi import PolygonalROI
from glue.core.subset import Subset, RoiSubsetState
from glue.core.data import Data
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import LayerArtistContainer
from glue.core.state import lookup_class_with_patches
from glue.utils import defer_draw
from glue.viewers.common.viz_client import VizClient, init_mpl, update_appearance_from_settings
from glue.viewers.scatter.layer_artist import ScatterLayerBase, ScatterLayerArtist
from .layer_artist import (ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist, ImageLayerBase,
RGBImageLayerBase, SubsetImageLayerBase)
def requires_data(func):
"""
Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present.
"""
@wraps(func)
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
display_data = CallbackProperty(None)
display_attribute = CallbackProperty(None)
display_aspect = CallbackProperty('equal')
def __init__(self, data, layer_artist_container=None):
VizClient.__init__(self, data)
self.artists = layer_artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
# slice through ND cube
# ('y', 'x', 2)
# means current data slice is [:, :, 2], and axis=0 is vertical on plot
self._slice = None
# how to extract a downsampled/cropped 2D image to plot
# (ComponentID, slice, slice, ...)
self._view = None
# cropped/downsampled image
# self._image == self.display_data[self._view]
self._image = None
# if this is set, render this instead of self._image
self._override_image = None
# maps attributes -> normalization settings
self._norm_cache = {}
def point_details(self, x, y):
if self.display_data is None:
return dict(labels=['x=%s' % x, 'y=%s' % y],
pix=(x, y), world=(x, y), value=np.nan)
data = self.display_data
pix = self._pixel_coords(x, y)
labels = self.coordinate_labels(pix)
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
view = []
for p, s in zip(pix, data.shape):
p = int(p)
if not (0 <= p < s):
value = None
break
view.append(slice(p, p + 1))
else:
if self._override_image is None:
value = self.display_data[self.display_attribute, view]
else:
value = self._override_image[int(y), int(x)]
value = value.ravel()[0]
return dict(pix=pix, world=world, labels=labels, value=value)
def coordinate_labels(self, pix):
"""
Return human-readable labels for a position in pixel coords
Parameters
----------
pix : tuple of int
Pixel coordinates of point in the data. Note that pix describes a
position in the *data*, not necessarily the image display.
Returns
-------
list
A list of strings for each coordinate axis, of the form
``axis_label_name=world_coordinate_value``
"""
data = self.display_data
if data is None:
return []
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
labels = ['%s=%s' % (data.get_world_component_id(i).label, w)
for i, w in enumerate(world)]
return labels
@callback_property
def slice(self):
"""
Returns a tuple describing the current slice through the data
The tuple has length equal to the dimensionality of the display
data. Each entry is either:
* 'x' if the dimension is mapped to the X image axis
* 'y' if the dimension is mapped to the Y image axis
* a number, indicating which fixed slice the dimension is restricted to
"""
if self._slice is not None:
return self._slice
if self.display_data is None:
return tuple()
ndim = self.display_data.ndim
if ndim == 1:
self._slice = ('x',)
elif ndim == 2:
self._slice = ('y', 'x')
else:
self._slice = (0,) * (ndim - 2) + ('y', 'x')
return self._slice
@slice.setter
@defer_draw
def slice(self, value):
if self.slice == tuple(value):
return
if value == tuple():
return
relim = value.index('x') != self._slice.index('x') or \
value.index('y') != self._slice.index('y')
self._slice = tuple(value)
self._clear_override()
self._update_axis_labels()
self._update_data_plot(relim=relim)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
@property
def is_3D(self):
"""
Returns True if the display data has 3 dimensions
"""
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
"""
For 3D data, returns the pixel index of the current slice.
Otherwise, returns `None`.
"""
if self.is_3D:
for s in self.slice:
if s not in ['x', 'y']:
return s
return None
@property
def image(self):
return self._image
@requires_data
def override_image(self, image):
"""
Temporarily override the current slice view with another image (i.e.,
an aggregate).
"""
self._override_image = image
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.override_image(image)
self._update_data_plot()
self._redraw()
def _clear_override(self):
self._override_image = None
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.clear_override()
@slice_ind.setter
@defer_draw
def slice_ind(self, value):
if self.is_3D:
slc = [s if s in ['x', 'y'] else value for s in self.slice]
self.slice = slc
self._update_data_plot()
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
else:
raise IndexError("Can only set slice_ind for 3D images")
def can_image_data(self, data):
return data.ndim > 1
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
@defer_draw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
self._slice = None
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen.
"""
pass
@requires_data
@defer_draw
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
@defer_draw
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
x, y = np.s_[:], np.s_[:]
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
@requires_data
def _numerical_data_changed(self, message):
data = message.sender
self._update_data_plot(force=True)
self._update_scatter_layer(data)
for s in data.subsets:
self._update_subset_single(s, force=True)
self._redraw()
@requires_data
def _update_data_plot(self, relim=False, force=False):
"""
Re-sync the main image and its subsets.
"""
if relim:
self.relim()
view = self._build_view()
self._image = self.display_data[view]
transpose = self.slice.index('x') < self.slice.index('y')
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerBase) and
a.layer.data is not self.display_data):
self.artists.remove(a)
else:
if isinstance(a, ImageLayerArtist):
a.update(view, transpose, aspect=self.display_aspect)
else:
a.update(view, transpose)
for a in self.artists[self.display_data]:
meth = a.update if not force else a.force_update
if isinstance(a, ImageLayerArtist):
meth(view, transpose=transpose, aspect=self.display_aspect)
else:
meth(view, transpose=transpose)
def _update_subset_single(self, s, redraw=False, force=False):
"""
Update the location and visual properties of each point in a single
subset.
Parameters
----------
s: `~glue.core.subset.Subset`
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
if s not in self.artists:
return
self._update_scatter_layer(s)
if s.data is not self.display_data:
return
view = self._build_view()
transpose = self.slice.index('x') < self.slice.index('y')
for a in self.artists[s]:
meth = a.update if not force else a.force_update
if isinstance(a, SubsetImageLayerArtist):
meth(view, transpose=transpose, aspect=self.display_aspect)
else:
meth(view, transpose=transpose)
if redraw:
self._redraw()
@property
def _slice_ori(self):
if not self.is_3D:
return None
for i, s in enumerate(self.slice):
if s not in ['x', 'y']:
return i
@requires_data
@defer_draw
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_plot_attributes()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
if layer is self.display_data:
for layer in self.artists:
if isinstance(layer, ImageLayerArtist):
self.display_data = layer.data
break
else:
for artist in self.artists:
self.delete_layer(artist.layer)
self.display_data = None
self.display_attribute = None
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
# only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def rgb_mode(self, enable=None):
"""
Query whether RGB mode is enabled, or toggle RGB mode.
Parameters
----------
enable : bool or None
If `True` or `False`, explicitly enable/disable RGB mode.
If `None`, check if RGB mode is enabled
Returns
-------
LayerArtist or None
If RGB mode is enabled, returns an ``RGBImageLayerBase``.
If ``enable`` is `False`, return the new ``ImageLayerArtist``
"""
# XXX need to better handle case where two RGBImageLayerArtists
# are created
if enable is None:
for a in self.artists:
if isinstance(a, RGBImageLayerBase):
return a
return None
result = None
layer = self.display_data
if enable:
layer = self.display_data
a = self._new_rgb_layer(layer)
if a is None:
return
a.r = a.g = a.b = self.display_attribute
with self.artists.ignore_empty():
self.artists.pop(layer)
self.artists.append(a)
result = a
else:
with self.artists.ignore_empty():
for artist in list(self.artists):
if isinstance(artist, RGBImageLayerBase):
self.artists.remove(artist)
result = self.add_layer(layer)
self._update_data_plot()
self._redraw()
return result
def _update_aspect(self):
self._update_data_plot(relim=True)
self._redraw()
def add_layer(self, layer):
if layer in self.artists:
return self.artists[layer][0]
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
# if data is 1D, try to scatter plot
if len(layer.data.shape) == 1:
return self.add_scatter_layer(layer)
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
result = self._new_image_layer(layer)
self.artists.append(result)
for s in layer.subsets:
self.add_layer(s)
self.set_data(layer)
elif isinstance(layer, Subset):
result = self._new_subset_image_layer(layer)
self.artists.append(result)
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
return result
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
result = self._new_scatter_layer(layer)
self.artists.append(result)
self._update_scatter_layer(layer)
return result
def _update_scatter_plots(self):
for layer in self.artists.layers:
self._update_scatter_layer(layer)
@requires_data
def _update_scatter_layer(self, layer, force=False):
if layer not in self.artists:
return
xatt, yatt = self._get_plot_attributes()
need_redraw = False
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerBase):
continue
need_redraw = True
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self.slice_ind) & (zatt <= self.slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update() if not force else a.force_update()
a.redraw()
if need_redraw:
self._redraw()
@requires_data
def _get_plot_attributes(self):
x, y = _slice_axis(self.display_data.shape, self.slice)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""
From a slice coordinate (x,y), return the full (possibly >2D) numpy
index into the full data.
.. note:: The inputs to this function are the reverse of numpy
convention (horizontal axis first, then vertical)
Returns
-------
coords : tuple
Either a tuple of (x,y) or (x,y,z)
"""
result = list(self.slice)
result[result.index('x')] = x
result[result.index('y')] = y
return result
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Can only set slice_ori for 3D images")
if ori == 0:
self.slice = (0, 'y', 'x')
elif ori == 1:
self.slice = ('y', 0, 'x')
elif ori == 2:
self.slice = ('y', 'x', 0)
else:
raise ValueError("Orientation must be 0, 1, or 2")
def restore_layers(self, layers, context):
"""
Restore a list of glue-serialized layer dicts.
"""
for layer in layers:
c = lookup_class_with_patches(layer.pop('_type'))
props = dict((k, v if k == 'stretch' else context.object(v))
for k, v in layer.items())
l = props['layer']
if issubclass(c, ScatterLayerBase):
l = self.add_scatter_layer(l)
elif issubclass(c, RGBImageLayerBase):
r = props.pop('r')
g = props.pop('g')
b = props.pop('b')
self.display_data = l
self.display_attribute = r
l = self.rgb_mode(True)
l.r = r
l.g = g
l.b = b
elif issubclass(c, (ImageLayerBase, SubsetImageLayerBase)):
if isinstance(l, Data):
self.set_data(l)
l = self.add_layer(l)
else:
raise ValueError("Cannot restore layer of type %s" % l)
l.properties = props
def _on_component_replace(self, msg):
if self.display_attribute is msg.old:
self.display_attribute = msg.new
def register_to_hub(self, hub):
super(ImageClient, self).register_to_hub(hub)
hub.subscribe(self,
ComponentReplacedMessage,
self._on_component_replace)
# subclasses should override the following methods as appropriate
def _new_rgb_layer(self, layer):
"""
Construct and return an RGBImageLayerBase for the given layer
Parameters
----------
layer : :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
Which object to visualize
"""
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
"""
Construct and return a SubsetImageLayerArtist for the given layer
Parameters
----------
layer : :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
Which object to visualize
"""
raise NotImplementedError()
def _new_image_layer(self, layer):
"""
Construct and return an ImageLayerArtist for the given layer
Parameters
----------
layer : :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
Which object to visualize
"""
raise NotImplementedError()
def _new_scatter_layer(self, layer):
"""
Construct and return a ScatterLayerArtist for the given layer
Parameters
----------
layer : :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
Which object to visualize
"""
raise NotImplementedError()
def _update_axis_labels(self):
"""
Sync the displays for labels on X/Y axes, because the data or slice has
changed
"""
raise NotImplementedError()
def relim(self):
"""
Reset view window to the default pan/zoom setting.
"""
pass
def show_crosshairs(self, x, y):
pass
def clear_crosshairs(self):
pass
class MplImageClient(ImageClient):
def __init__(self, data, figure=None, axes=None, layer_artist_container=None):
super(MplImageClient, self).__init__(data, layer_artist_container)
if axes is not None:
raise ValueError("ImageClient does not accept an axes")
self._setup_mpl(figure, axes)
# description of field of view and center of image
self._view_window = None
# artist for a crosshair
self._crosshairs = None
def _setup_mpl(self, figure, axes):
figure, axes = init_mpl(figure, axes, wcs=True)
self._axes = axes
self._axes.get_xaxis().set_ticks([])
self._axes.get_yaxis().set_ticks([])
self._figure = figure
# custom axes formatter
def format_coord(x, y):
data = self.display_data
if data is None:
# MPL default method
return type(self._axes).format_coord(self._axes, x, y)
info = self.point_details(x, y)
return ' '.join(info['labels'])
self._axes.format_coord = format_coord
self._cid = self._axes.figure.canvas.mpl_connect('button_release_event',
self.check_update)
if hasattr(self._axes.figure.canvas, 'homeButton'):
# test code doesn't always use Glue's custom FigureCanvas
self._axes.figure.canvas.homeButton.connect(self.check_update)
@property
def axes(self):
return self._axes
def check_update(self, *args):
"""
For the Matplotlib client, see if the view window has changed enough
such that the images should be resampled
"""
logging.getLogger(__name__).debug("check update")
# We need to make sure we reapply the aspect ratio manually here,
# because at this point, if the user has zoomed in to a region with a
# different aspect ratio than the original view, Matplotlib has not yet
# enforced computed the final limits. This is an issue if we have
# requested square pixels.
self.axes.apply_aspect()
vw = _view_window(self._axes)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_and_redraw()
self._view_window = vw
def _update_and_redraw(self):
self._update_data_plot()
self._update_subset_plots()
self._redraw()
@requires_data
def _update_axis_labels(self):
labels = _axis_labels(self.display_data, self.slice)
self._update_wcs_axes(self.display_data, self.slice)
self._axes.set_xlabel(labels[1])
self._axes.set_ylabel(labels[0])
@defer_draw
def _update_wcs_axes(self, data, slc):
wcs = getattr(data.coords, 'wcs', None)
if wcs is not None and hasattr(self.axes, 'reset_wcs'):
self.axes.reset_wcs(wcs, slices=slc[::-1])
def _redraw(self):
self._axes.figure.canvas.draw()
def relim(self):
shp = _2d_shape(self.display_data.shape, self.slice)
self._axes.set_xlim(0, shp[1])
self._axes.set_ylim(0, shp[0])
def _new_rgb_layer(self, layer):
v = self._view or self._build_view()
a = RGBImageLayerArtist(layer, self._axes, last_view=v)
return a
def _new_image_layer(self, layer):
return ImageLayerArtist(layer, self._axes)
def _new_subset_image_layer(self, layer):
return SubsetImageLayerArtist(layer, self._axes)
def _new_scatter_layer(self, layer):
return ScatterLayerArtist(layer, self._axes)
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self.slice)
v = extract_matched_slices(self._axes, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
def show_crosshairs(self, x, y):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs, = self._axes.plot([x], [y], '+', ms=12,
mfc='none', mec='#d32d26',
mew=2, zorder=100)
self._redraw()
def clear_crosshairs(self):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs = None
def register_to_hub(self, hub):
super(MplImageClient, self).register_to_hub(hub)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings
or 'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
def _2d_shape(shape, slc):
"""
Return the shape of the 2D slice through a 2 or 3D image.
"""
# - numpy ordering here
return shape[slc.index('y')], shape[slc.index('x')]
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the x and y axes of
the image.
Parameters
----------
shape : tuple
Shape of original data.
slc : tuple
Slice through the data, 'x', and 'y'
"""
return slc.index('x'), slc.index('y')
def _axis_labels(data, slc):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
return names[slc.index('y')], names[slc.index('x')]
def _view_window(ax):
"""
Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = (ax.transAxes.transform([(1, 1)]) - ax.transAxes.transform([(0, 0)]))[0]
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""
Choose a default ComponentID to display for data
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/image/client.py",
"copies": "1",
"size": "28957",
"license": "bsd-3-clause",
"hash": 6251882682900647000,
"line_mean": 30.1031149302,
"line_max": 95,
"alpha_frac": 0.5601063646,
"autogenerated": false,
"ratio": 3.9924169309251343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002679373969261392,
"num_lines": 931
} |
from __future__ import absolute_import, division, print_function
import logging
from functools import wraps
import numpy as np
# We avoid importing matplotlib up here otherwise Matplotlib and therefore Qt
# get imported as soon as glue.utils is imported.
from glue.external.axescache import AxesCache
from glue.utils.misc import DeferredMethod
__all__ = ['renderless_figure', 'all_artists', 'new_artists', 'remove_artists',
'get_extent', 'view_cascade', 'fast_limits', 'defer_draw',
'color2rgb', 'point_contour', 'cache_axes']
def renderless_figure():
# Matplotlib figure that skips the render step, for test speed
from mock import MagicMock
import matplotlib.pyplot as plt
fig = plt.figure()
fig.canvas.draw = MagicMock()
plt.close('all')
return fig
def all_artists(fig):
"""
Build a set of all Matplotlib artists in a Figure
"""
return set(item
for axes in fig.axes
for container in [axes.collections, axes.patches, axes.lines,
axes.texts, axes.artists, axes.images]
for item in container)
def new_artists(fig, old_artists):
"""
Find the newly-added artists in a figure
:param fig: Matplotlib figure
:param old_artists: Return value from :func:all_artists
:returns: All artists added since all_artists was called
"""
return all_artists(fig) - old_artists
def remove_artists(artists):
"""
Remove a collection of matplotlib artists from a scene
:param artists: Container of artists
"""
for a in artists:
try:
a.remove()
except ValueError: # already removed
pass
def get_extent(view, transpose=False):
sy, sx = [s for s in view if isinstance(s, slice)]
if transpose:
return (sy.start, sy.stop, sx.start, sx.stop)
return (sx.start, sx.stop, sy.start, sy.stop)
def view_cascade(data, view):
"""
Return a set of views progressively zoomed out of input at roughly constant
pixel count
Parameters
----------
data : array-like
The array to view
view :
The original view into the data
"""
shp = data.shape
v2 = list(view)
logging.debug("image shape: %s, view: %s", shp, view)
# choose stride length that roughly samples entire image
# at roughly the same pixel count
step = max(shp[i - 1] * v.step // max(v.stop - v.start, 1)
for i, v in enumerate(view) if isinstance(v, slice))
step = max(step, 1)
for i, v in enumerate(v2):
if not(isinstance(v, slice)):
continue
v2[i] = slice(0, shp[i - 1], step)
return tuple(v2), view
def _scoreatpercentile(values, percentile, limit=None):
# Avoid using the scipy version since it is available in Numpy
if limit is not None:
values = values[(values >= limit[0]) & (values <= limit[1])]
return np.percentile(values, percentile)
def fast_limits(data, plo, phi):
"""
Quickly estimate percentiles in an array, using a downsampled version
Parameters
----------
data : `numpy.ndarray`
The array to estimate the percentiles for
plo, phi : float
The percentile values
Returns
-------
lo, hi : float
The percentile values
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
values = np.asarray(data)[view]
if ~np.isfinite(values).any():
return (0.0, 1.0)
limits = (-np.inf, np.inf)
lo = _scoreatpercentile(values.flat, plo, limit=limits)
hi = _scoreatpercentile(values.flat, phi, limit=limits)
return lo, hi
def defer_draw(func):
"""
Decorator that globally defers all Agg canvas draws until
function exit.
If a Canvas instance's draw method is invoked multiple times,
it will only be called once after the wrapped function returns.
"""
@wraps(func)
def wrapper(*args, **kwargs):
from matplotlib.backends.backend_agg import FigureCanvasAgg
# don't recursively defer draws
if isinstance(FigureCanvasAgg.draw, DeferredMethod):
return func(*args, **kwargs)
try:
FigureCanvasAgg.draw = DeferredMethod(FigureCanvasAgg.draw)
result = func(*args, **kwargs)
finally:
FigureCanvasAgg.draw.execute_deferred_calls()
FigureCanvasAgg.draw = FigureCanvasAgg.draw.original_method
return result
wrapper._is_deferred = True
return wrapper
def color2rgb(color):
from matplotlib.colors import ColorConverter
result = ColorConverter().to_rgb(color)
return result
def point_contour(x, y, data):
"""Calculate the contour that passes through (x,y) in data
:param x: x location
:param y: y location
:param data: 2D image
:type data: :class:`numpy.ndarray`
Returns:
* A (nrow, 2column) numpy array. The two columns give the x and
y locations of the contour vertices
"""
try:
from scipy import ndimage
except ImportError:
raise ImportError("Image processing in Glue requires SciPy")
inten = data[y, x]
labeled, nr_objects = ndimage.label(data >= inten)
z = data * (labeled == labeled[y, x])
y, x = np.mgrid[0:data.shape[0], 0:data.shape[1]]
from matplotlib import _cntr
cnt = _cntr.Cntr(x, y, z)
xy = cnt.trace(inten)
if not xy:
return None
xy = xy[0]
return xy
class AxesResizer(object):
def __init__(self, ax, margins):
self.ax = ax
self.margins = margins
@property
def margins(self):
return self._margins
@margins.setter
def margins(self, margins):
self._margins = margins
def on_resize(self, event):
fig_width = self.ax.figure.get_figwidth()
fig_height = self.ax.figure.get_figheight()
x0 = self.margins[0] / fig_width
x1 = 1 - self.margins[1] / fig_width
y0 = self.margins[2] / fig_height
y1 = 1 - self.margins[3] / fig_height
dx = max(0.01, x1 - x0)
dy = max(0.01, y1 - y0)
self.ax.set_position([x0, y0, dx, dy])
self.ax.figure.canvas.draw()
def freeze_margins(axes, margins=[1, 1, 1, 1]):
"""
Make sure margins of axes stay fixed.
Parameters
----------
ax_class : matplotlib.axes.Axes
The axes class for which to fix the margins
margins : iterable
The margins, in inches. The order of the margins is
``[left, right, bottom, top]``
Notes
-----
The object that controls the resizing is stored as the resizer attribute of
the Axes. This can be used to then change the margins:
>> ax.resizer.margins = [0.5, 0.5, 0.5, 0.5]
"""
axes.resizer = AxesResizer(axes, margins)
axes.figure.canvas.mpl_connect('resize_event', axes.resizer.on_resize)
def cache_axes(axes, toolbar):
"""
Set up caching for an axes object.
After this, cached renders will be used to quickly re-render an axes during
window resizing or interactive pan/zooming.
This function returns an AxesCache instance.
Parameters
----------
axes : `~matplotlib.axes.Axes`
The axes to cache
toolbar : `~glue.viewers.common.qt.toolbar.GlueToolbar`
The toolbar managing the axes' canvas
"""
canvas = axes.figure.canvas
cache = AxesCache(axes)
canvas.resize_begin.connect(cache.enable)
canvas.resize_end.connect(cache.disable)
toolbar.pan_begin.connect(cache.enable)
toolbar.pan_end.connect(cache.disable)
return cache
| {
"repo_name": "saimn/glue",
"path": "glue/utils/matplotlib.py",
"copies": "2",
"size": "7665",
"license": "bsd-3-clause",
"hash": -1435514187026761500,
"line_mean": 26.0848056537,
"line_max": 79,
"alpha_frac": 0.6275277234,
"autogenerated": false,
"ratio": 3.69041887337506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5317946596775059,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
from inspect import getmro
from collections import defaultdict
from glue.core.exceptions import InvalidSubscriber, InvalidMessage
from glue.core.message import Message
__all__ = ['Hub', 'HubListener']
class Hub(object):
"""The hub manages communication between subscribers.
Objects :func:`subscribe` to receive specific message types. When
a message is passed to :func:`broadcast`, the hub observes the
following protocol:
* For each subscriber, it looks for a message class
subscription that is a superclass of the input message type
(if several are found, the most-subclassed one is chosen)
* If one is found, it calls the subscriptions filter(message)
class (if provided)
* If filter(message) == True, it calls handler(message)
(or notify(message) if handler wasn't provided).
"""
def __init__(self, *args):
"""
Any arguments that are passed to Hub will be registered
to the new hub object.
"""
# Dictionary of subscriptions
self._subscriptions = defaultdict(dict)
from glue.core.data import Data
from glue.core.subset import Subset
from glue.core.data_collection import DataCollection
listeners = set(filter(lambda x: isinstance(x, HubListener), args))
data = set(filter(lambda x: isinstance(x, Data), args))
subsets = set(filter(lambda x: isinstance(x, Subset), args))
dcs = set(filter(lambda x: isinstance(x, DataCollection), args))
listeners -= (data | subsets | dcs)
if set(listeners | data | subsets | dcs) != set(args):
raise TypeError("Inputs must be HubListener, data, subset, or "
"data collection objects")
for l in listeners:
l.register_to_hub(self)
for d in data:
d.register_to_hub(self)
for dc in dcs:
dc.register_to_hub(self)
for s in subsets:
s.register()
def subscribe(self, subscriber, message_class,
handler=None,
filter=lambda x: True):
"""Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:type handler: Callable
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
:type filter: Callable
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
"""
if not isinstance(subscriber, HubListener):
raise InvalidSubscriber("Subscriber must be a HubListener: %s" %
type(subscriber))
if not isinstance(message_class, type) or \
not issubclass(message_class, Message):
raise InvalidMessage("message class must be a subclass of "
"glue.Message: %s" % type(message_class))
logging.getLogger(__name__).info("Subscribing %s to %s",
subscriber, message_class.__name__)
if not handler:
handler = subscriber.notify
self._subscriptions[subscriber][message_class] = (filter, handler)
def is_subscribed(self, subscriber, message):
"""
Test whether the subscriber has suscribed to a given message class
:param subscriber: The subscriber to test
:param message: The message class to test
Returns:
True if the subscriber/message pair have been subscribed to the hub
"""
return subscriber in self._subscriptions and \
message in self._subscriptions[subscriber]
def get_handler(self, subscriber, message):
try:
return self._subscriptions[subscriber][message][1]
except KeyError:
return None
def unsubscribe(self, subscriber, message):
"""
Remove a (subscriber,message) pair from subscription list.
The handler originally attached to the subscription will
no longer be called when broadcasting messages of type message
"""
if subscriber not in self._subscriptions:
return
if message in self._subscriptions[subscriber]:
self._subscriptions[subscriber].pop(message)
def unsubscribe_all(self, subscriber):
"""
Unsubscribe the object from any subscriptions.
"""
if subscriber in self._subscriptions:
self._subscriptions.pop(subscriber)
def _find_handlers(self, message):
"""Yields all (subscriber, handler) pairs that should receive a message
"""
# self._subscriptions:
# subscriber => { message type => (filter, handler)}
# loop over subscribed objects
for subscriber, subscriptions in list(self._subscriptions.items()):
# subscriptions to message or its superclasses
messages = [msg for msg in subscriptions.keys() if
issubclass(type(message), msg)]
if len(messages) == 0:
continue
# narrow to the most-specific message
candidate = max(messages, key=_mro_count)
test, handler = subscriptions[candidate]
if test(message):
yield subscriber, handler
def broadcast(self, message):
"""Broadcasts a message to all subscribed objects.
:param message: The message to broadcast
:type message: :class:`~glue.core.message.Message`
"""
logging.getLogger(__name__).info("Broadcasting %s", message)
for subscriber, handler in self._find_handlers(message):
handler(message)
def __getstate__(self):
""" Return a picklable representation of the hub
Note: Only objects in glue.core are currently supported
as pickleable. Thus, any subscriptions from objects outside
glue.core will note be saved or restored
"""
result = self.__dict__.copy()
result['_subscriptions'] = self._subscriptions.copy()
for s in self._subscriptions:
try:
module = s.__module__
except AttributeError:
module = ''
if not module.startswith('glue.core'):
print('Pickle warning: Hub removing subscription to %s' % s)
result['_subscriptions'].pop(s)
return result
class HubListener(object):
"""
The base class for any object that subscribes to hub messages.
This interface defines a single method, notify, that receives
messages
"""
def register_to_hub(self, hub):
raise NotImplementedError
def unregister(self, hub):
""" Default unregistration action. Calls hub.unsubscribe_all on self"""
hub.unsubscribe_all(self)
def notify(self, message):
raise NotImplementedError("Message has no handler: %s" % message)
def _mro_count(obj):
return len(getmro(obj))
| {
"repo_name": "saimn/glue",
"path": "glue/core/hub.py",
"copies": "2",
"size": "7787",
"license": "bsd-3-clause",
"hash": -449945022321079550,
"line_mean": 33.9192825112,
"line_max": 79,
"alpha_frac": 0.6110183639,
"autogenerated": false,
"ratio": 4.833643699565488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016015374759769378,
"num_lines": 223
} |
from __future__ import absolute_import, division, print_function
import logging
from inspect import getmro
from collections import defaultdict
from .message import Message
from .exceptions import InvalidSubscriber, InvalidMessage
__all__ = ['Hub', 'HubListener']
class Hub(object):
"""The hub manages communication between subscribers.
Objects :func:`subscribe` to receive specific message types. When
a message is passed to :func:`broadcast`, the hub observes the
following protocol:
* For each subscriber, it looks for a message class
subscription that is a superclass of the input message type
(if several are found, the most-subclassed one is chosen)
* If one is found, it calls the subscriptions filter(message)
class (if provided)
* If filter(message) == True, it calls handler(message)
(or notify(message) if handler wasn't provided).
"""
def __init__(self, *args):
"""
Any arguments that are passed to Hub will be registered
to the new hub object.
"""
# Dictionary of subscriptions
self._subscriptions = defaultdict(dict)
from .data import Data
from .subset import Subset
from .data_collection import DataCollection
listeners = set(filter(lambda x: isinstance(x, HubListener), args))
data = set(filter(lambda x: isinstance(x, Data), args))
subsets = set(filter(lambda x: isinstance(x, Subset), args))
dcs = set(filter(lambda x: isinstance(x, DataCollection), args))
listeners -= (data | subsets | dcs)
if set(listeners | data | subsets | dcs) != set(args):
raise TypeError("Inputs must be HubListener, data, subset, or "
"data collection objects")
for l in listeners:
l.register_to_hub(self)
for d in data:
d.register_to_hub(self)
for dc in dcs:
dc.register_to_hub(self)
for s in subsets:
s.register()
def subscribe(self, subscriber, message_class,
handler=None,
filter=lambda x: True):
"""Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:type handler: Callable
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
:type filter: Callable
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
"""
if not isinstance(subscriber, HubListener):
raise InvalidSubscriber("Subscriber must be a HubListener: %s" %
type(subscriber))
if not isinstance(message_class, type) or \
not issubclass(message_class, Message):
raise InvalidMessage("message class must be a subclass of "
"glue.Message: %s" % type(message_class))
logging.getLogger(__name__).info("Subscribing %s to %s",
subscriber, message_class.__name__)
if not handler:
handler = subscriber.notify
self._subscriptions[subscriber][message_class] = (filter, handler)
def is_subscribed(self, subscriber, message):
"""
Test whether the subscriber has suscribed to a given message class
:param subscriber: The subscriber to test
:param message: The message class to test
Returns:
True if the subscriber/message pair have been subscribed to the hub
"""
return subscriber in self._subscriptions and \
message in self._subscriptions[subscriber]
def get_handler(self, subscriber, message):
try:
return self._subscriptions[subscriber][message][1]
except KeyError:
return None
def unsubscribe(self, subscriber, message):
"""
Remove a (subscriber,message) pair from subscription list.
The handler originally attached to the subscription will
no longer be called when broadcasting messages of type message
"""
if subscriber not in self._subscriptions:
return
if message in self._subscriptions[subscriber]:
self._subscriptions[subscriber].pop(message)
def unsubscribe_all(self, subscriber):
"""
Unsubscribe the object from any subscriptions.
"""
if subscriber in self._subscriptions:
self._subscriptions.pop(subscriber)
def _find_handlers(self, message):
"""Yields all (subscriber, handler) pairs that should receive a message
"""
# self._subscriptions:
# subscriber => { message type => (filter, handler)}
# loop over subscribed objects
for subscriber, subscriptions in list(self._subscriptions.items()):
# subscriptions to message or its superclasses
messages = [msg for msg in subscriptions.keys() if
issubclass(type(message), msg)]
if len(messages) == 0:
continue
# narrow to the most-specific message
candidate = max(messages, key=_mro_count)
test, handler = subscriptions[candidate]
if test(message):
yield subscriber, handler
def broadcast(self, message):
"""Broadcasts a message to all subscribed objects.
:param message: The message to broadcast
:type message: :class:`~glue.core.message.Message`
"""
logging.getLogger(__name__).info("Broadcasting %s", message)
for subscriber, handler in self._find_handlers(message):
handler(message)
def __getstate__(self):
""" Return a picklable representation of the hub
Note: Only objects in glue.core are currently supported
as pickleable. Thus, any subscriptions from objects outside
glue.core will note be saved or restored
"""
result = self.__dict__.copy()
result['_subscriptions'] = self._subscriptions.copy()
for s in self._subscriptions:
try:
module = s.__module__
except AttributeError:
module = ''
if not module.startswith('glue.core'):
print('Pickle warning: Hub removing subscription to %s' % s)
result['_subscriptions'].pop(s)
return result
class HubListener(object):
"""
The base class for any object that subscribes to hub messages.
This interface defines a single method, notify, that receives
messages
"""
def register_to_hub(self, hub):
raise NotImplementedError
def unregister(self, hub):
""" Default unregistration action. Calls hub.unsubscribe_all on self"""
hub.unsubscribe_all(self)
def notify(self, message):
raise NotImplementedError("Message has no handler: %s" % message)
def _mro_count(obj):
return len(getmro(obj))
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/hub.py",
"copies": "1",
"size": "7741",
"license": "bsd-3-clause",
"hash": 4333110494248442000,
"line_mean": 33.8693693694,
"line_max": 79,
"alpha_frac": 0.6094819791,
"autogenerated": false,
"ratio": 4.8654934003771215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016087516087516087,
"num_lines": 222
} |
from __future__ import absolute_import, division, print_function
import logging
from itertools import count
from functools import partial
import numpy as np
import pandas as pd
from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator
from matplotlib.ticker import (LogFormatterMathtext, ScalarFormatter,
FuncFormatter)
__all__ = ["relim", "split_component_view", "join_component_view",
"facet_subsets", "colorize_subsets", "disambiguate",
"row_lookup", 'small_view', 'small_view_array', 'visible_limits',
'tick_linker', 'update_ticks']
def relim(lo, hi, log=False):
logging.getLogger(__name__).debug("Inputs to relim: %r %r", lo, hi)
x, y = lo, hi
if log:
if lo < 0:
x = 1e-5
if hi < 0:
y = 1e5
return x * .95, y * 1.05
delta = y - x
return (x - .02 * delta, y + .02 * delta)
def split_component_view(arg):
"""Split the input to data or subset.__getitem__ into its pieces.
:param arg: The input passed to data or subset.__getitem__.
Assumed to be either a scalar or tuple
:rtype: tuple
The first item is the Component selection (a ComponentID or
string)
The second item is a view (tuple of slices, slice scalar, or view
object)
"""
if isinstance(arg, tuple):
if len(arg) == 1:
raise TypeError("Expected a scalar or >length-1 tuple, "
"got length-1 tuple")
if len(arg) == 2:
return arg[0], arg[1]
return arg[0], arg[1:]
else:
return arg, None
def join_component_view(component, view):
"""Pack a componentID and optional view into single tuple
Returns an object compatible with data.__getitem__ and related
methods. Handles edge cases of when view is None, a scalar, a
tuple, etc.
:param component: ComponentID
:param view: view into data, or None
"""
if view is None:
return component
result = [component]
try:
result.extend(view)
except TypeError: # view is a scalar
result = [component, view]
return tuple(result)
def facet_subsets(data_collection, cid, lo=None, hi=None, steps=5,
prefix='', log=False):
"""Create a series of subsets that partition the values of
a particular attribute into several bins
This creates `steps` new subet groups, adds them to the data collection,
and returns the list of newly created subset groups.
:param data: DataCollection object to use
:type data: :class:`~glue.core.data_collection.DataCollection`
:param cid: ComponentID to facet on
:type data: :class:`~glue.core.component_id.ComponentID`
:param lo: The lower bound for the faceting. Defaults to minimum value
in data
:type lo: float
:param hi: The upper bound for the faceting. Defaults to maximum
value in data
:type hi: float
:param steps: The number of subsets to create. Defaults to 5
:type steps: int
:param prefix: If present, the new subset labels will begin with `prefix`
:type prefix: str
:param log: If True, space divisions logarithmically. Default=False
:type log: bool
:returns: List of :class:`~glue.core.subset_group.SubsetGroup` instances
added to `data`
Example::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2)
creates 2 new subsets. The first represents the constraint 0 <=
mass < 5. The second represents 5 <= mass <= 10::
facet_subset(data, data.id['mass'], lo=10, hi=0, steps=2)
Creates 2 new subsets. The first represents the constraint 10 >= x > 5
The second represents 5 >= mass >= 0::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2, prefix='m')
Labels the subsets ``m_1`` and ``m_2``.
Note that the last range is inclusive on both sides. For example, if ``lo``
is 0 and ``hi`` is 5, and ``steps`` is 5, then the intervals for the subsets
are [0,1), [1,2), [2,3), [3,4), and [4,5].
"""
from glue.core.exceptions import IncompatibleAttribute
if lo is None or hi is None:
for data in data_collection:
try:
vals = data[cid]
break
except IncompatibleAttribute:
continue
else:
raise ValueError("Cannot infer data limits for ComponentID %s"
% cid)
if lo is None:
lo = np.nanmin(vals)
if hi is None:
hi = np.nanmax(vals)
reverse = lo > hi
if log:
rng = np.logspace(np.log10(lo), np.log10(hi), steps + 1)
else:
rng = np.linspace(lo, hi, steps + 1)
states = []
labels = []
for i in range(steps):
# The if i < steps - 1 clauses are needed because the last interval
# has to be inclusive on both sides.
if reverse:
if i < steps - 1:
states.append((cid <= rng[i]) & (cid > rng[i + 1]))
labels.append(prefix + '{0}<{1}<={2}'.format(rng[i + 1], cid, rng[i]))
else:
states.append((cid <= rng[i]) & (cid >= rng[i + 1]))
labels.append(prefix + '{0}<={1}<={2}'.format(rng[i + 1], cid, rng[i]))
else:
if i < steps - 1:
states.append((cid >= rng[i]) & (cid < rng[i + 1]))
labels.append(prefix + '{0}<={1}<{2}'.format(rng[i], cid, rng[i + 1]))
else:
states.append((cid >= rng[i]) & (cid <= rng[i + 1]))
labels.append(prefix + '{0}<={1}<={2}'.format(rng[i], cid, rng[i + 1]))
result = []
for lbl, s in zip(labels, states):
sg = data_collection.new_subset_group(label=lbl, subset_state=s)
result.append(sg)
return result
def colorize_subsets(subsets, cmap, lo=0, hi=1):
"""Re-color a list of subsets according to a colormap
:param subsets: List of subsets
:param cmap: Matplotlib colormap instance
:param lo: Start location in colormap. 0-1. Defaults to 0
:param hi: End location in colormap. 0-1. Defaults to 1
The colormap will be sampled at `len(subsets)` even intervals
between `lo` and `hi`. The color at the `ith` interval will be
applied to `subsets[i]`
"""
from matplotlib import cm
sm = cm.ScalarMappable(cmap=cmap)
sm.norm.vmin = 0
sm.norm.vmax = 1
vals = np.linspace(lo, hi, len(subsets))
rgbas = sm.to_rgba(vals)
for color, subset in zip(rgbas, subsets):
r, g, b, a = color
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
subset.style.color = '#%2.2x%2.2x%2.2x' % (r, g, b)
def disambiguate(label, taken):
"""If necessary, add a suffix to label to avoid name conflicts
:param label: desired label
:param taken: set of taken names
Returns label if it is not in the taken set. Otherwise, returns
label_NN where NN is the lowest integer such that label_NN not in taken.
"""
if label not in taken:
return label
suffix = "_%2.2i"
label = str(label)
for i in count(1):
candidate = label + (suffix % i)
if candidate not in taken:
return candidate
def row_lookup(data, categories):
"""
Lookup which row in categories each data item is equal to
:param data: array-like
:param categories: array-like of unique values
:returns: Float array.
If result[i] is finite, then data[i] = categoreis[result[i]]
Otherwise, data[i] is not in the categories list
"""
# np.searchsorted doesn't work on mixed types in Python3
ndata, ncat = len(data), len(categories)
data = pd.DataFrame({'data': data, 'row': np.arange(ndata)})
cats = pd.DataFrame({'categories': categories,
'cat_row': np.arange(ncat)})
m = pd.merge(data, cats, left_on='data', right_on='categories')
result = np.zeros(ndata, dtype=float) * np.nan
result[np.array(m.row)] = m.cat_row
return result
def small_view(data, attribute):
"""
Extract a downsampled view from a dataset, for quick
statistical summaries
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
return data[attribute, view]
def small_view_array(data):
"""
Same as small_view, except using a numpy array as input
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
return np.asarray(data)[view]
def visible_limits(artists, axis):
"""
Determines the data limits for the data in a set of artists.
Ignores non-visible artists
Assumes each artist as a get_data method wich returns a tuple of x,y
Returns a tuple of min, max for the requested axis, or None if no data
present
:param artists: An iterable collection of artists
:param axis: Which axis to compute. 0=xaxis, 1=yaxis
"""
data = []
for art in artists:
if not art.visible:
continue
xy = art.get_data()
assert isinstance(xy, tuple)
val = xy[axis]
if val.size > 0:
data.append(xy[axis])
if len(data) == 0:
return
data = np.hstack(data)
if data.size == 0:
return
data = data[np.isfinite(data)]
if data.size == 0:
return
lo, hi = np.nanmin(data), np.nanmax(data)
if not np.isfinite(lo):
return
return lo, hi
def tick_linker(all_categories, pos, *args):
# We need to take care to ignore negative indices since these would actually
# 'work' 'when accessing all_categories, but we need to avoid that.
if pos < 0 or pos >= len(all_categories):
return ''
else:
try:
pos = np.round(pos)
return all_categories[int(pos)]
except IndexError:
return ''
def update_ticks(axes, coord, components, is_log):
"""
Changes the axes to have the proper tick formatting based on the type of
component.
:param axes: A matplotlib axis object to alter
:param coord: 'x' or 'y'
:param components: A list() of components that are plotted along this axis
:param is_log: Boolean for log-scale.
:kwarg max_categories: The maximum number of categories to display.
:return: None or #categories if components is Categorical
"""
if coord == 'x':
axis = axes.xaxis
elif coord == 'y':
axis = axes.yaxis
else:
raise TypeError("coord must be one of x,y")
is_cat = all(comp.categorical for comp in components)
if is_log:
axis.set_major_locator(LogLocator())
axis.set_major_formatter(LogFormatterMathtext())
elif is_cat:
all_categories = np.empty((0,), dtype=np.object)
for comp in components:
all_categories = np.union1d(comp.categories, all_categories)
locator = MaxNLocator(10, integer=True)
locator.view_limits(0, all_categories.shape[0])
format_func = partial(tick_linker, all_categories)
formatter = FuncFormatter(format_func)
axis.set_major_locator(locator)
axis.set_major_formatter(formatter)
return all_categories.shape[0]
else:
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/util.py",
"copies": "1",
"size": "11445",
"license": "bsd-3-clause",
"hash": -3899604487544298000,
"line_mean": 29.6016042781,
"line_max": 87,
"alpha_frac": 0.5994757536,
"autogenerated": false,
"ratio": 3.729227761485826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9827762243207496,
"avg_score": 0.00018825437566601745,
"num_lines": 374
} |
from __future__ import absolute_import, division, print_function
import logging
from time import time
import numpy as np
from ginga.misc import Bunch
from ginga.util import wcsmod
from ginga import AstroImage, BaseImage
from glue.core.util import split_component_view
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import LayerArtistBase
from glue.utils import view_shape, stack_view, color2rgb, Pointer
from glue.viewers.image.client import ImageClient
from glue.viewers.image.layer_artist import ImageLayerBase, SubsetImageLayerBase
wcsmod.use('astropy')
class GingaClient(ImageClient):
def __init__(self, data, canvas=None, layer_artist_container=None):
super(GingaClient, self).__init__(data, layer_artist_container)
self._setup_ginga(canvas)
def _setup_ginga(self, canvas):
if canvas is None:
raise ValueError("GingaClient needs a canvas")
self._canvas = canvas
self._wcs = None
self._crosshair_id = '_crosshair'
def _new_rgb_layer(self, layer):
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
return GingaSubsetImageLayer(layer, self._canvas)
def _new_image_layer(self, layer):
return GingaImageLayer(layer, self._canvas)
def _new_scatter_layer(self, layer):
raise NotImplementedError()
def _update_axis_labels(self):
pass
def _update_and_redraw(self):
pass
def set_cmap(self, cmap):
self._canvas.set_cmap(cmap)
def show_crosshairs(self, x, y):
self.clear_crosshairs()
c = self._canvas.viewer.getDrawClass('point')(x, y, 6, color='red',
style='plus')
self._canvas.add(c, tag=self._crosshair_id, redraw=True)
def clear_crosshairs(self):
try:
self._canvas.deleteObjectsByTag([self._crosshair_id], redraw=False)
except:
pass
class GingaLayerArtist(LayerArtistBase):
zorder = Pointer('_zorder')
visible = Pointer('_visible')
def __init__(self, layer, canvas):
super(GingaLayerArtist, self).__init__(layer)
self._canvas = canvas
self._visible = True
def redraw(self, whence=0):
self._canvas.redraw(whence=whence)
class GingaImageLayer(GingaLayerArtist, ImageLayerBase):
# unused by Ginga
cmap = None
norm = None
def __init__(self, layer, canvas):
super(GingaImageLayer, self).__init__(layer, canvas)
self._override_image = None
self._tag = "layer%s_%s" % (layer.label, time())
self._img = None # DataImage instance
self._enabled = True
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
if self._visible == value:
return
self._visible = value
if not value:
self.clear()
elif self._img:
self._canvas.set_image(self._img)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag('_image')
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def set_norm(self, **kwargs):
# NOP for ginga
pass
def clear_norm(self):
# NOP for ginga
pass
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def clear(self):
# remove previously added image
try:
self._canvas.deleteObjectsByTag(['_image'], redraw=False)
except:
pass
@property
def enabled(self):
return self._enabled
def update(self, view, transpose=False):
if not self.visible:
return
# update ginga model
comp, view = split_component_view(view)
if self._img is None:
self._img = DataImage(self.layer, comp, view, transpose)
self._canvas.set_image(self._img)
self._img.data = self.layer
self._img.component = comp
self._img.view = view
self._img.transpose = transpose
self._img.override_image = self._override_image
self.redraw()
class GingaSubsetImageLayer(GingaLayerArtist, SubsetImageLayerBase):
def __init__(self, layer, canvas):
super(GingaSubsetImageLayer, self).__init__(layer, canvas)
self._img = None
self._cimg = None
self._tag = "layer%s_%s" % (layer.label, time())
self._enabled = True
@property
def visible(self):
return self._visible
@property
def enabled(self):
return self._enabled
@visible.setter
def visible(self, value):
if value is self._visible:
return
self._visible = value
if not value:
self.clear()
elif self._cimg:
self._canvas.add(self._cimg, tag=self._tag, redraw=True)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag(self._tag)
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def clear(self):
try:
self._canvas.deleteObjectsByTag([self._tag], redraw=True)
except:
pass
def _update_ginga_models(self, view, transpose=False):
subset = self.layer
logging.getLogger(__name__).debug("View into subset %s is %s", self.layer, view)
_, view = split_component_view(view) # discard ComponentID
r, g, b = color2rgb(self.layer.style.color)
if self._img is None:
self._img = SubsetImage(subset, view)
if self._cimg is None:
# SubsetImages can't be added to canvases directly. Need
# to wrap into a ginga canvas type.
Image = self._canvas.getDrawClass('image')
self._cimg = Image(0, 0, self._img, alpha=0.5, flipy=False)
self._img.view = view
self._img.color = (r, g, b)
self._img.transpose = transpose
def _check_enabled(self):
"""
Sync the enabled/disabled status, based on whether
mask is computable
"""
self._enabled = True
try:
# the first pixel
view = tuple(0 for _ in self.layer.data.shape)
self.layer.to_mask(view)
except IncompatibleAttribute as exc:
self._enabled = False
self.disable_invalid_attributes(*exc.args)
return self._enabled
def _ensure_added(self):
""" Add artist to canvas if needed """
try:
self._canvas.getObjectByTag(self._tag)
except KeyError:
self._canvas.add(self._cimg, tag=self._tag, redraw=False)
def update(self, view, transpose=False):
self._check_enabled()
self._update_ginga_models(view, transpose)
if self._enabled and self._visible:
self._ensure_added()
else:
self.clear()
self.redraw(whence=0)
def forbidden(*args):
raise ValueError("Forbidden")
class DataImage(AstroImage.AstroImage):
"""
A Ginga image subclass to interface with Glue Data objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, data, component, view, transpose=False,
override_image=None, **kwargs):
"""
Parameters
----------
data : glue.core.data.Data
The data to image
component : glue.core.data.ComponentID
The ComponentID in the data to image
view : numpy-style view
The view into the data to image. Must produce a 2D array
transpose : bool
Whether to transpose the view
override_image : numpy array, optional
Whether to show override_image instead of the view into the data.
The override image must have the same shape as the 2D view into
the data.
kwargs : dict
Extra kwargs are passed to the superclass
"""
self.transpose = transpose
self.view = view
self.data = data
self.component = component
self.override_image = None
super(DataImage, self).__init__(**kwargs)
@property
def shape(self):
"""
The shape of the 2D view into the data
"""
result = view_shape(self.data.shape, self.view)
if self.transpose:
result = result[::-1]
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D image.
"""
if self.override_image is not None:
return self.override_image[view]
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.data.shape, *views)
return self.data[self.component, view]
class SubsetImage(BaseImage.BaseImage):
"""
A Ginga image subclass to interface with Glue subset objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, subset, view, color=(0, 1, 0), transpose=False, **kwargs):
"""
Parameters
----------
subset : glue.core.subset.Subset
The subset to image
view : numpy-style view
The view into the subset to image. Must produce a 2D array
color : tuple of 3 floats in range [0, 1]
The color to image the subset as
transpose : bool
Whether to transpose the view
kwargs : dict
Extra kwargs are passed to the ginga superclass
"""
super(SubsetImage, self).__init__(**kwargs)
self.subset = subset
self.view = view
self.transpose = transpose
self.color = color
self.order = 'RGBA'
@property
def shape(self):
"""
Shape of the 2D view into the subset mask
"""
result = view_shape(self.subset.data.shape, self.view)
if self.transpose:
result = result[::-1]
return tuple(list(result) + [4]) # 4th dim is RGBA channels
def _rgb_from_mask(self, mask):
"""
Turn a boolean mask into a 4-channel RGBA image
"""
r, g, b = self.color
ones = mask * 0 + 255
alpha = mask * 127
result = np.dstack((ones * r, ones * g, ones * b, alpha)).astype(np.uint8)
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D subset mask.
"""
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.subset.data.shape, *views)
mask = self.subset.to_mask(view)
return self._rgb_from_mask(mask)
def _set_minmax(self):
# we already know the data bounds
self.minval = 0
self.maxval = 256
self.minval_noinf = self.minval
self.maxval_noinf = self.maxval
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
doit = getattr(self, '_doit', False)
self._doit = not doit
# default implementation if downsampling
if doit or new_wd <= (x2 - x1 + 1) or new_ht <= (y2 - y1 + 1):
return super(SubsetImage, self).get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht)
# if upsampling, prevent extra to_mask() computation
x1, x2 = np.clip([x1, x2], 0, self.width - 2).astype(np.int)
y1, y2 = np.clip([y1, y2], 0, self.height - 2).astype(np.int)
result = self._slice(np.s_[y1:y2 + 1, x1:x2 + 1])
yi = np.linspace(0, result.shape[0], new_ht).astype(np.int).reshape(-1, 1).clip(0, result.shape[0] - 1)
xi = np.linspace(0, result.shape[1], new_wd).astype(np.int).reshape(1, -1).clip(0, result.shape[1] - 1)
yi, xi = [np.array(a) for a in np.broadcast_arrays(yi, xi)]
result = result[yi, xi]
scale_x = 1.0 * result.shape[1] / (x2 - x1 + 1)
scale_y = 1.0 * result.shape[0] / (y2 - y1 + 1)
return Bunch.Bunch(data=result, scale_x=scale_x, scale_y=scale_y)
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/ginga_viewer/qt/client.py",
"copies": "1",
"size": "13126",
"license": "bsd-3-clause",
"hash": 1973570190032169700,
"line_mean": 28.8997722096,
"line_max": 111,
"alpha_frac": 0.5815937833,
"autogenerated": false,
"ratio": 3.803535207186323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48851289904863227,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import docker.errors
from urllib3.exceptions import TimeoutError
from requests.exceptions import Timeout
from ..decorators import retry
from ..exceptions import ClipperException
logger = logging.getLogger(__name__)
def create_network(docker_client, name):
try:
docker_client.networks.create(
name=name,
check_duplicate=True)
except docker.errors.APIError:
logger.debug(
"{nw} network already exists".format(nw=name))
except ConnectionError:
msg = "Unable to Connect to Docker. Please Check if Docker is running."
raise ClipperException(msg)
# Wait for maximum 5 min.
@retry((docker.errors.NotFound, docker.errors.APIError, ClipperException),
tries=300, delay=1, backoff=1, logger=logger)
def check_container_status(docker_client, name):
state = docker_client.containers.get(name).attrs.get("State")
inspected = docker_client.api.inspect_container(name)
if (state is not None and state.get("Status") == "running") or \
(inspected is not None and inspected.get("State").get("Health").get(
"Status") == "healthy"):
return
else:
msg = "{} container is not running yet or broken. ".format(name) + \
"We will try to run again. Please analyze logs if " + \
"it keeps failing"
raise ClipperException(msg)
@retry((docker.errors.APIError, TimeoutError, Timeout),
tries=5, logger=logger)
def list_containers(docker_client, filters):
return docker_client.containers.list(filters=filters)
@retry((docker.errors.APIError, TimeoutError, Timeout),
tries=5, logger=logger)
def run_container(docker_client, image, cmd=None, name=None, ports=None,
labels=None, environment=None, log_config=None, volumes=None,
user=None, extra_container_kwargs=None):
return docker_client.containers.run(
image,
command=cmd,
name=name,
ports=ports,
labels=labels,
environment=environment,
volumes=volumes,
user=user,
log_config=log_config,
**extra_container_kwargs)
| {
"repo_name": "ucbrise/clipper",
"path": "clipper_admin/clipper_admin/docker/docker_api_utils.py",
"copies": "1",
"size": "2242",
"license": "apache-2.0",
"hash": -823434625730076000,
"line_mean": 33.4923076923,
"line_max": 80,
"alpha_frac": 0.6601248885,
"autogenerated": false,
"ratio": 4.025134649910234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5185259538410234,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import multiprocessing
import threading
import time
import workflows
import workflows.frontend.utilization
import workflows.services
import workflows.transport
import workflows.util
from workflows.services.common_service import CommonService
try: # Python3 compatibility
basestring = basestring
except NameError:
basestring = (str, bytes)
class Frontend():
'''The frontend class encapsulates the actual service. It controls the
service process and keeps the connection to the transport layer. It
can process control messages directly, or pass messages on to the
service.
'''
def __init__(self, transport=None, service=None,
transport_command_channel=None, restart_service=False,
verbose_service=False, environment=None):
'''Create a frontend instance. Connect to the transport layer, start any
requested service, begin broadcasting status information and listen
for control commands.
:param restart_service:
If the service process dies unexpectedly the frontend should start
a new instance.
:param service:
A class or name of the class to be instantiated in a subprocess as
service.
:param transport:
Either the name of a transport class, a transport class, or a
transport class object.
:param transport_command_channel:
An optional channel of a transport subscription to be listened to for
commands.
:param verbose_service:
If set, run services with increased logging level (DEBUG).
:param environment:
An optional dictionary that is passed to started services.
'''
self.__lock = threading.RLock()
self.__hostid = workflows.util.generate_unique_host_id()
self._service = None # pointer to the service instance
self._service_class_name = None
self._service_factory = None # pointer to the service class
self._service_name = None
self._service_starttime = None
self._service_rapidstarts = None
self._pipe_commands = None # frontend -> service
self._pipe_service = None # frontend <- service
self._service_status = CommonService.SERVICE_STATUS_NONE
self._service_status_announced = CommonService.SERVICE_STATUS_NONE
self.restart_service = restart_service
self.shutdown = False
# Status broadcast related variables
self._status_interval = 6
self._status_last_broadcast = 0
self._status_idle_since = None
self._utilization = workflows.frontend.utilization.UtilizationStatistics(
summation_period=self._status_interval)
# Set up logging
self._verbose_service = verbose_service
class LogAdapter():
'''A helper class that acts like a dictionary, but actually reads its
values from the get_status() function.'''
status_fn = self.get_status
status = status_fn()
def __iter__(self):
'''Update cached status values, renaming the keys for logging.
Return a dictionary key iterator.'''
self.status = { 'workflows_' + k: v \
for k, v in self.status_fn().items() }
return self.status.__iter__()
def __getitem__(self, key):
'''Return a value from the status dictionary.'''
return self.status.__getitem__(key)
self.log = logging.LoggerAdapter(
logging.getLogger('workflows.frontend'),
LogAdapter())
self.log.warn = self.log.warning # Add support for deprecated .warn
# Connect to the network transport layer
if transport is None or isinstance(transport, basestring):
self._transport_factory = workflows.transport.lookup(transport)
else:
self._transport_factory = transport
assert hasattr(self._transport_factory, '__call__'), "No valid transport factory given"
self._transport = self._transport_factory()
assert self._transport.connect(), "Could not connect to transport layer"
if transport_command_channel:
self._transport.subscribe_broadcast(transport_command_channel,
self.process_transport_command)
self.log.debug('Listening for commands on transport layer')
# Save environment for service starts
self._service_environment = environment
# Start initial service if one has been requested
self._service_factory = service
if service is not None:
self.update_status(CommonService.SERVICE_STATUS_NEW)
self.switch_service()
else:
self.update_status()
def update_status(self, status_code=None):
'''Update the service status kept inside the frontend (_service_status).
The status is broadcast over the network immediately. If the status
changes to IDLE then this message is delayed. The IDLE status is only
broadcast if it is held for over 0.5 seconds.
When the status does not change it is still broadcast every
_status_interval seconds.
:param status_code: Either an integer describing the service status
(see workflows.services.common_service), or None
if the status is unchanged.
'''
if status_code is not None:
self._service_status = status_code
self._utilization.update_status(status_code)
# Check whether IDLE status should be delayed
if self._service_status == CommonService.SERVICE_STATUS_IDLE:
if self._status_idle_since is None:
self._status_idle_since = time.time()
return
elif self._status_idle_since + 0.5 > time.time():
return
else:
self._status_idle_since = None
new_status = self._service_status != self._service_status_announced
if (new_status or self._status_last_broadcast + self._status_interval <= time.time()) \
and self._transport and self._transport.is_connected():
self._service_status_announced = self._service_status
self._transport.broadcast_status(self.get_status())
self._status_last_broadcast = time.time()
def run(self):
'''The main loop of the frontend. Here incoming messages from the service
are processed and forwarded to the corresponding callback methods.'''
self.log.debug("Entered main loop")
while not self.shutdown:
# If no service is running slow down the main loop
if not self._pipe_service:
time.sleep(0.3)
self.update_status()
# While a service is running, check for incoming messages from that service
if self._pipe_service and self._pipe_service.poll(1):
try:
message = self._pipe_service.recv()
if isinstance(message, dict) and 'band' in message:
# only dictionaries with 'band' entry are valid messages
try:
handler = getattr(self, 'parse_band_' + message['band'])
except AttributeError:
handler = None
self.log.warning("Unknown band %s", str(message['band']))
if handler:
# try:
handler(message)
# except Exception:
# print('Uh oh. What to do.')
else:
self.log.warning("Invalid message received %s", str(message))
except EOFError:
# Service has gone away
error_message = False
if self._service_status == CommonService.SERVICE_STATUS_END:
self.log.info("Service terminated")
elif self._service_status == CommonService.SERVICE_STATUS_ERROR:
error_message = "Service terminated with error code"
elif self._service_status in (CommonService.SERVICE_STATUS_NONE,
CommonService.SERVICE_STATUS_NEW,
CommonService.SERVICE_STATUS_STARTING):
error_message = 'Service may have died unexpectedly in ' \
+ 'initialization (last known status: %s)' \
% CommonService.human_readable_state.get( \
self._service_status, self._service_status)
else:
error_message = "Service may have died unexpectedly" \
" (last known status: %s)" \
% CommonService.human_readable_state.get( \
self._service_status, self._service_status)
if error_message:
self.log.error(error_message)
self._terminate_service()
if self.restart_service:
self.exponential_backoff()
else:
self.shutdown = True
if error_message:
raise workflows.Error(error_message)
with self.__lock:
if self._service is None and self.restart_service and self._service_factory:
self.update_status(status_code=CommonService.SERVICE_STATUS_NEW)
self.switch_service()
# Check that the transport is alive
if not self._transport.is_connected():
self._terminate_service()
raise workflows.Error('Lost transport layer connection')
self.log.debug("Left main loop")
self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN)
self._terminate_service()
self.log.debug("Terminating.")
def send_command(self, command):
'''Send command to service via the command queue.'''
if self._pipe_commands:
self._pipe_commands.send(command)
else:
if self.shutdown:
# Stop delivering messages in shutdown.
self.log.info('During shutdown no command queue pipe found for command\n%s', str(command))
else:
self.log.error('No command queue pipe found for command\n%s', str(command))
def process_transport_command(self, header, message):
'''Parse a command coming in through the transport command subscription'''
if not isinstance(message, dict):
return
relevant = False
if 'host' in message: # Filter by host
if message['host'] != self.__hostid: return
relevant = True
if 'service' in message: # Filter by service
if message['service'] != self._service_class_name: return
relevant = True
if not relevant: # Ignore message unless at least one filter matches
return
if message.get('command'):
self.log.info('Received command \'%s\' via transport layer', message['command'])
if message['command'] == 'shutdown':
self.shutdown = True
else:
self.log.warning('Received invalid transport command message')
def parse_band_log(self, message):
'''Process incoming logging messages from the service.'''
if 'payload' in message and hasattr(message['payload'], 'name'):
record = message['payload']
for k in dir(record):
if k.startswith('workflows_exc_'):
setattr(record, k[14:], getattr(record, k))
delattr(record, k)
for k, v in self.get_status().items():
setattr(record, 'workflows_' + k, v)
logging.getLogger(record.name).handle(record)
else:
self.log.warning("Received broken record on log band\n" + \
"Message: %s\nRecord: %s",
str(message),
str(hasattr(message.get('payload'), '__dict__') and message['payload'].__dict__))
def parse_band_set_name(self, message):
'''Process incoming message indicating service name change.'''
if message.get('name'):
self._service_name = message['name']
else:
self.log.warning("Received broken record on set_name band\nMessage: %s",
str(message))
def parse_band_status_update(self, message):
'''Process incoming status updates from the service.'''
self.log.debug("Status update: " + str(message))
self.update_status(status_code=message['statuscode'])
def get_host_id(self):
'''Get a cached copy of the host id.'''
return self.__hostid
def get_status(self):
'''Returns a dictionary containing all relevant status information to be
broadcast across the network.'''
return { 'host': self.__hostid,
'status': self._service_status_announced,
'statustext': CommonService.human_readable_state.get(self._service_status_announced),
'service': self._service_name,
'serviceclass': self._service_class_name,
'utilization': self._utilization.report(),
'workflows': workflows.version(),
}
def exponential_backoff(self):
'''A function that keeps waiting longer and longer the more rapidly it is called.
It can be used to increasingly slow down service starts when they keep failing.'''
last_service_switch = self._service_starttime
if not last_service_switch:
return
time_since_last_switch = time.time() - last_service_switch
if not self._service_rapidstarts:
self._service_rapidstarts = 0
minimum_wait = 0.1 * (2 ** self._service_rapidstarts)
minimum_wait = min(5, minimum_wait)
if time_since_last_switch > 10:
self._service_rapidstarts = 0
return
self._service_rapidstarts += 1
self.log.debug("Slowing down service starts (%.1f seconds)", minimum_wait)
time.sleep(minimum_wait)
def switch_service(self, new_service=None):
'''Start a new service in a subprocess.
:param new_service: Either a service name or a service class. If not set,
start up a new instance of the previous class
:return: True on success, False on failure.
'''
if new_service:
self._service_factory = new_service
with self.__lock:
# Terminate existing service if necessary
if self._service is not None:
self._terminate_service()
# Find service class if necessary
if isinstance(self._service_factory, basestring):
self._service_factory = workflows.services.lookup(self._service_factory)
if not self._service_factory:
return False
# Set up new service object
service_instance = self._service_factory(
environment=self._service_environment,
)
# Set up pipes and connect service object
svc_commands, self._pipe_commands = multiprocessing.Pipe(False)
self._pipe_service, svc_tofrontend = multiprocessing.Pipe(False)
service_instance.connect(
commands=svc_commands,
frontend=svc_tofrontend,
)
# Set up transport layer for new service
service_instance.transport = self._transport_factory()
# Start new service in a separate process
self._service = multiprocessing.Process(
target=service_instance.start, args=(),
kwargs={'verbose_log': self._verbose_service})
self._service_name = service_instance.get_name()
self._service_class_name = service_instance.__class__.__name__
self._service.daemon = True
self._service.name = 'workflows-service'
self._service.start()
self._service_starttime = time.time()
# Starting the process copies all file descriptors.
# At this point (and no sooner!) the passed pipe objects must be closed
# in this process here.
svc_commands.close()
svc_tofrontend.close()
self.log.info("Started service: %s", self._service_name)
return True
def _terminate_service(self):
'''Force termination of running service.
Disconnect queues, end queue feeder threads.
Wait for service process to clear, drop all references.'''
with self.__lock:
if self._service:
self._service.terminate()
if self._pipe_commands:
self._pipe_commands.close()
if self._pipe_service:
self._pipe_service.close()
self._pipe_commands = None
self._pipe_service = None
self._service_class_name = None
self._service_name = None
if self._service_status != CommonService.SERVICE_STATUS_TEARDOWN:
self.update_status(status_code=CommonService.SERVICE_STATUS_END)
if self._service:
self._service.join() # must wait for process to be actually destroyed
self._service = None
| {
"repo_name": "xia2/workflows",
"path": "workflows/frontend/__init__.py",
"copies": "1",
"size": "16220",
"license": "bsd-3-clause",
"hash": 1695671499647257000,
"line_mean": 40.0632911392,
"line_max": 101,
"alpha_frac": 0.6411837238,
"autogenerated": false,
"ratio": 4.43048347446053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557166719826053,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy
import xarray
from itertools import groupby
from collections import defaultdict, OrderedDict
from ..model import GeoBox
from ..utils import check_intersect
from .query import Query, query_group_by
from .core import Datacube, get_measurements
_LOG = logging.getLogger(__name__)
class GridWorkflow(object):
"""
GridWorkflow deals with cell- and tile-based processing using a grid defining a projection and resolution.
"""
def __init__(self, index, grid_spec=None, product=None):
"""
Create a grid workflow tool.
Either grid_spec or product must be supplied.
:param Index index: The database index to use.
:param GridSpec grid_spec: The grid projection and resolution
:param str product: The name of an existing product, if no grid_spec is supplied.
"""
self.index = index
if grid_spec is None:
product = self.index.products.get_by_name(product)
grid_spec = product and product.grid_spec
self.grid_spec = grid_spec
def cell_observations(self, cell_index=None, geopolygon=None, **indexers):
"""
List datasets, grouped by cell.
:param (int,int) cell_index: The cell index. E.g. (14, -40)
:param indexers: Query to match the datasets, see :py:class:`datacube.api.query.Query`
:return: Datsets grouped by cell index
:rtype: dict[(int,int), list[:py:class:`datacube.model.Dataset`]]
.. seealso::
:meth:`datacube.Datacube.product_observations`
:class:`datacube.api.query.Query`
"""
if cell_index:
assert isinstance(cell_index, tuple)
assert len(cell_index) == 2
geobox = GeoBox.from_grid_spec(self.grid_spec, cell_index)
geopolygon = geobox.extent
query = Query(index=self.index, geopolygon=geopolygon, **indexers)
if not query.product:
raise RuntimeError('must specify a product')
observations = self.index.datasets.search_eager(**query.search_terms)
if not observations:
return {}
tiles = {}
if cell_index:
tile_geopolygon = geobox.extent
datasets = [dataset for dataset in observations
if check_intersect(tile_geopolygon, dataset.extent.to_crs(self.grid_spec.crs))]
tiles[cell_index] = {
'datasets': datasets,
'geobox': geobox
}
else:
for dataset in observations:
dataset_extent = dataset.extent.to_crs(self.grid_spec.crs)
for tile_index, tile_geobox in self.grid_spec.tiles(dataset_extent.boundingbox):
if check_intersect(tile_geobox.extent, dataset_extent):
tiles.setdefault(tile_index,
{'datasets': [],
'geobox': tile_geobox})['datasets'].append(dataset)
return tiles
@staticmethod
def cell_sources(observations, group_by):
"""
Group observations into sources
:param observations: datasets grouped by cell index, like from :meth:`datacube.GridWorkflow.cell_observations`
:param str group_by: grouping method, one of "time", "solar_day"
:return: sources grouped by cell index
:rtype: dict[(int,int), :py:class:`xarray.DataArray`]
.. seealso::
:meth:`load`
:meth:`datacube.Datacube.product_sources`
"""
stack = defaultdict(dict)
for cell_index, observation in observations.items():
sources = Datacube.product_sources(observation['datasets'],
group_func=group_by.group_by_func,
dimension=group_by.dimension,
units=group_by.units)
stack[cell_index] = {
'sources': sources,
'geobox': observation['geobox']
}
return stack
@staticmethod
def tile_sources(observations, group_by):
"""
Split observations into tiles and group into sources
:param observations: datasets grouped by cell index, like from :meth:`datacube.GridWorkflow.cell_observations`
:param str group_by: grouping method, one of "time", "solar_day"
:return: sources grouped by cell index and time
:rtype: dict[tuple(int, int, numpy.datetime64), :py:class:`xarray.DataArray`]
.. seealso::
:meth:`load`
:meth:`datacube.Datacube.product_sources`
"""
stack = defaultdict(dict)
for cell_index, observation in observations.items():
observation['datasets'].sort(key=group_by.group_by_func)
groups = [(key, tuple(group)) for key, group in groupby(observation['datasets'], group_by.group_by_func)]
for key, datasets in groups:
data = numpy.empty(1, dtype=object)
data[0] = datasets
variable = xarray.Variable((group_by.dimension,), data,
fastpath=True)
coord = xarray.Variable((group_by.dimension,),
numpy.array([key], dtype='datetime64[ns]'),
attrs={'units': group_by.units},
fastpath=True)
coords = OrderedDict([(group_by.dimension, coord)])
sources = xarray.DataArray(variable, coords=coords, fastpath=True)
stack[cell_index + (coord.values[0],)] = {
'sources': sources,
'geobox': observation['geobox']
} # TODO: Should be a Tile Obj
return stack
def list_cells(self, cell_index=None, **query):
"""
List cells that match the query.
Cells are included if they contain any datasets that match the query using the same format as
:meth:`datacube.Datacube.load`.
E.g.::
gw.list_cells(product_type='nbar',
platform=['LANDSAT_5', 'LANDSAT_7', 'LANDSAT_8'],
time=('2001-1-1 00:00:00', '2001-3-31 23:59:59'))
:param (int,int) cell_index: The cell index. E.g. (14, -40)
:param query: see :py:class:`datacube.api.query.Query`
:rtype: dict[(int, int), Cell]
"""
observations = self.cell_observations(cell_index, **query)
return self.cell_sources(observations, query_group_by(**query))
def list_tiles(self, cell_index=None, **query):
"""
List tiles of data, sorted by cell.
::
tiles = gw.list_tiles(product_type=['nbar', 'pq'], platform='LANDSAT_5')
The values can be passed to :meth:`load`
:param (int,int) cell_index: The cell index. E.g. (14, -40)
:param query: see :py:class:`datacube.api.query.Query`
:rtype: dict[(int, int, numpy.datetime64), Tile]
.. seealso:: :meth:`load`
"""
observations = self.cell_observations(cell_index, **query)
return self.tile_sources(observations, query_group_by(**query))
@staticmethod
def load(tile, measurements=None, chunk=None, dask_chunks=None, fuse_func=None):
"""
Load data for a cell/tile.
The data to be loaded is defined by the output of :meth:`list_tiles`.
See the documentation on using `xarray with dask <http://xarray.pydata.org/en/stable/dask.html>`_
for more information.
:param tile: The tile to load.
:param measurements: The name or list of names of measurements to load
:param dict chunk: Load a chunk of a cell/tile. Specify the slice in each output dimension.
:param dict dask_chunks: If the data should be loaded as needed using :py:class:`dask.array.Array`,
specify the chunk size in each output direction.
See the documentation on using `xarray with dask <http://xarray.pydata.org/en/stable/dask.html>`_
for more information.
:param fuse_func: Function to fuse together a tile that has been pre-grouped by calling
:meth:`list_cells` with a ``group_by`` parameter.
:return: The requested data.
:rtype: :py:class:`xarray.Dataset`
.. seealso::
:meth:`list_tiles` :meth:`list_cells`
"""
sources = tile['sources']
geobox = tile['geobox']
if chunk:
assert not set(chunk.keys()) - set(sources.dims+geobox.dimensions), 'bad dimensions'
sources = sources[tuple(chunk.get(dim, slice(None)) for dim in sources.dims)]
geobox = geobox[tuple(chunk.get(dim, slice(None)) for dim in geobox.dimensions)]
observations = []
for dataset in sources.values:
observations += dataset
all_measurements = get_measurements(observations)
if measurements:
measurements = [all_measurements[measurement] for measurement in measurements
if measurement in all_measurements]
else:
measurements = [measurement for measurement in all_measurements.values()]
dataset = Datacube.product_data(sources, geobox, measurements, dask_chunks=dask_chunks,
fuse_func=fuse_func)
return dataset
def __str__(self):
return "GridWorkflow<index={!r},\n\tgridspec={!r}>".format(self.index, self.grid_spec)
def __repr__(self):
return self.__str__()
| {
"repo_name": "ceos-seo/Data_Cube_v2",
"path": "agdc-v2/datacube/api/grid_workflow.py",
"copies": "1",
"size": "9765",
"license": "apache-2.0",
"hash": -4582628576569607000,
"line_mean": 38.6951219512,
"line_max": 118,
"alpha_frac": 0.5819764465,
"autogenerated": false,
"ratio": 4.2036160137752905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.528559246027529,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import operator
import numbers
import numpy as np
from .util import join_component_view
from .subset import InequalitySubsetState
from .contracts import contract, ContractsMeta
from ..external.six import add_metaclass
__all__ = ['ComponentLink', 'BinaryComponentLink']
def identity(x):
return x
OPSYM = {operator.add: '+', operator.sub: '-',
operator.truediv: '/', operator.mul: '*',
operator.pow: '**'}
@add_metaclass(ContractsMeta)
class ComponentLink(object):
""" ComponentLinks represent transformation logic between ComponentIDs
ComponentLinks are be used to derive one
:class:`~glue.core.data.ComponentID` from another:
Example::
def hours_to_minutes(hours):
return hours * 60
d = Data(hour=[1, 2, 3])
hour = d.id['hour']
minute = ComponentID('minute')
link = ComponentLink( [hour], minute, using=hours_to_minutes)
link.compute(d) # array([ 60, 120, 180])
d.add_component_link(link)
d['minute'] # array([ 60, 120, 180])
"""
@contract(using='callable|None',
inverse='callable|None')
def __init__(self, comp_from, comp_to, using=None, inverse=None):
"""
:param comp_from: The input ComponentIDs
:type comp_from: list of :class:`~glue.core.data.ComponentID`
:param comp_to: The target component ID
:type comp_from: :class:`~glue.core.data.ComponentID`
:pram using: The translation function which maps data from
comp_from to comp_to (optional)
The using function should satisfy::
using(data[comp_from[0]],...,data[comp_from[-1]]) = desired data
:param inverse:
The inverse translation function, if exists (optional)
:raises:
TypeError if input is invalid
.. note ::
Both ``inverse`` and ``using`` should accept and return
numpy arrays
"""
from .data import ComponentID
self._from = comp_from
self._to = comp_to
if using is None:
using = identity
self._using = using
self._inverse = inverse
self.hidden = False # show in widgets?
self.identity = self._using is identity
if type(comp_from) is not list:
raise TypeError("comp_from must be a list: %s" % type(comp_from))
if not all(isinstance(f, ComponentID) for f in self._from):
raise TypeError("from argument is not a list of ComponentIDs: %s" %
self._from)
if not isinstance(self._to, ComponentID):
raise TypeError("to argument is not a ComponentID: %s" %
type(self._to))
if using is identity:
if len(comp_from) != 1:
raise TypeError("comp_from must have only 1 element, "
"or a 'using' function must be provided")
@contract(data='isinstance(Data)', view='array_view')
def compute(self, data, view=None):
"""For a given data set, compute the component comp_to given
the data associated with each comp_from and the ``using``
function
:param data: The data set to use
:param view: Optional view (e.g. slice) through the data to use
*Returns*:
The data associated with comp_to component
*Raises*:
InvalidAttribute, if the data set doesn't have all the
ComponentIDs needed for the transformation
"""
logger = logging.getLogger(__name__)
args = [data[join_component_view(f, view)] for f in self._from]
logger.debug("shape of first argument: %s", args[0].shape)
result = self._using(*args)
logger.debug("shape of result: %s", result.shape)
if result.shape != args[0].shape:
logger.warn("ComponentLink function %s changed shape. Fixing",
self._using.__name__)
result.shape = args[0].shape
return result
def get_from_ids(self):
""" The list of input ComponentIDs """
return self._from
@contract(old='isinstance(ComponentID)', new='isinstance(ComponentID)')
def replace_ids(self, old, new):
"""Replace all references to an old ComponentID with references
to new
:parma old: ComponentID to replace
:param new: ComponentID to replace with
"""
for i, f in enumerate(self._from):
if f is old:
self._from[i] = new
if self._to is old:
self._to = new
@contract(_from='list(isinstance(ComponentID))')
def set_from_ids(self, _from):
if len(_from) != len(self._from):
raise ValueError("New ID list has the wrong length.")
self._from = _from
def get_to_id(self):
""" The target ComponentID """
return self._to
def set_to_id(self, to):
self._to = to
def get_using(self):
""" The transformation function """
return self._using
def get_inverse(self):
""" The inverse transformation, or None """
return self._inverse
def __str__(self):
args = ", ".join([t.label for t in self._from])
if self._using is not identity:
result = "%s <- %s(%s)" % (self._to, self._using.__name__, args)
else:
result = "%s <-> %s" % (self._to, self._from)
return result
def __repr__(self):
return str(self)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
@contract(other='isinstance(ComponentID)|component_like|float|int')
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
class CoordinateComponentLink(ComponentLink):
@contract(comp_from='list(isinstance(ComponentID))',
comp_to='isinstance(ComponentID)',
coords='isinstance(Coordinates)',
index=int,
pixel2world=bool)
def __init__(self, comp_from, comp_to, coords, index, pixel2world=True):
self.coords = coords
self.index = index
self.pixel2world = pixel2world
# Some coords don't need all pixel coords
# to compute a given world coord, and vice versa
# (e.g., spectral data cubes)
self.ndim = len(comp_from)
self.from_needed = coords.dependent_axes(index)
self._from_all = comp_from
comp_from = [comp_from[i] for i in self.from_needed]
super(CoordinateComponentLink, self).__init__(
comp_from, comp_to, self.using)
self.hidden = True
def using(self, *args):
attr = 'pixel2world' if self.pixel2world else 'world2pixel'
func = getattr(self.coords, attr)
args2 = [None] * self.ndim
for f, a in zip(self.from_needed, args):
args2[f] = a
for i in range(self.ndim):
if args2[i] is None:
args2[i] = np.zeros_like(args[0])
args2 = tuple(args2)
return func(*args2[::-1])[::-1][self.index]
def __str__(self):
rep = 'pix2world' if self.pixel2world else 'world2pix'
sup = super(CoordinateComponentLink, self).__str__()
return sup.replace('using', rep)
class BinaryComponentLink(ComponentLink):
"""
A ComponentLink that combines two inputs with a binary function
:param left: The first input argument.
ComponentID, ComponentLink, or number
:param right: The second input argument.
ComponentID, ComponentLink, or number
:param op: A function with two inputs that works on numpy arrays
The CompoentLink represents the logic of applying `op` to the
data associated with the inputs `left` and `right`.
"""
def __init__(self, left, right, op):
from .data import ComponentID
self._left = left
self._right = right
self._op = op
from_ = []
if isinstance(left, ComponentID):
from_.append(left)
elif isinstance(left, ComponentLink):
from_.extend(left.get_from_ids())
elif not isinstance(left, numbers.Number):
raise TypeError("Cannot create BinaryComponentLink using %s" %
left)
if isinstance(right, ComponentID):
from_.append(right)
elif isinstance(right, ComponentLink):
from_.extend(right.get_from_ids())
elif not isinstance(right, numbers.Number):
raise TypeError("Cannot create BinaryComponentLink using %s" %
right)
to = ComponentID("")
null = lambda *args: None
super(BinaryComponentLink, self).__init__(from_, to, null)
def replace_ids(self, old, new):
super(BinaryComponentLink, self).replace_ids(old, new)
if self._left is old:
self._left = new
elif isinstance(self._left, ComponentLink):
self._left.replace_ids(old, new)
if self._right is old:
self._right = new
elif isinstance(self._right, ComponentLink):
self._right.replace_ids(old, new)
def compute(self, data, view=None):
l = self._left
r = self._right
if not isinstance(self._left, numbers.Number):
l = data[self._left, view]
if not isinstance(self._right, numbers.Number):
r = data[self._right, view]
return self._op(l, r)
def __str__(self):
sym = OPSYM.get(self._op, self._op.__name__)
return '(%s %s %s)' % (self._left, sym, self._right)
def __repr__(self):
return "<BinaryComponentLink: %s>" % self
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/component_link.py",
"copies": "1",
"size": "12236",
"license": "bsd-3-clause",
"hash": -8295404549606741000,
"line_mean": 32.8011049724,
"line_max": 79,
"alpha_frac": 0.6052631579,
"autogenerated": false,
"ratio": 4.0637661906343405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516902934853434,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import operator
import warnings
import numpy as np
import pandas as pd
from glue.core.subset import (RoiSubsetState, RangeSubsetState,
CategoricalROISubsetState, AndState,
CategoricalMultiRangeSubsetState,
CategoricalROISubsetState2D)
from glue.core.roi import (PolygonalROI, CategoricalROI, RangeROI, XRangeROI,
YRangeROI, RectangularROI)
from glue.core.util import row_lookup
from glue.utils import (unique, shape_to_string, coerce_numeric, check_sorted,
polygon_line_intersections, broadcast_to)
__all__ = ['Component', 'DerivedComponent', 'CategoricalComponent',
'CoordinateComponent']
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Notes
-----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
"""
:param data: The data to store
:type data: :class:`numpy.ndarray`
:param units: Optional unit label
:type units: str
"""
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def units(self):
return self._units
@units.setter
def units(self, value):
if value is None:
self._units = ''
else:
self._units = str(value)
@property
def hidden(self):
"""Whether the Component is hidden by default"""
return False
@property
def data(self):
""" The underlying :class:`numpy.ndarray` """
return self._data
@property
def shape(self):
""" Tuple of array dimensions """
return self._data.shape
@property
def ndim(self):
""" The number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric
"""
# We need to be careful here to not just access self.data since that
# would force the computation of the whole component in the case of
# derived components, so instead we specifically only get the first
# element.
return np.can_cast(self[(0,) * self.ndim].dtype, np.complex)
@property
def categorical(self):
"""
Whether or not the datatype is categorical
"""
return False
def __str__(self):
return "%s with shape %s" % (self.__class__.__name__, shape_to_string(self.shape))
def jitter(self, method=None):
raise NotImplementedError
def subset_from_roi(self, att, roi, other_comp=None, other_att=None, coord='x'):
"""
Create a SubsetState object from an ROI.
This encapsulates the logic for creating subset states with Components.
See the documentation for CategoricalComponents for caveats involved
with mixed-type plots.
:param att: attribute name of this Component
:param roi: an ROI object
:param other_comp: The other Component for 2D ROIs
:param other_att: The attribute name of the other Component
:param coord: The orientation of this Component
:param is_nested: True if this was passed from another Component.
:return: A SubsetState (or subclass) object
"""
if coord not in ('x', 'y'):
raise ValueError('coord should be one of x/y')
other_coord = 'y' if coord == 'x' else 'x'
if isinstance(roi, RangeROI):
# The selection is either an x range or a y range
if roi.ori == coord:
# The selection applies to the current component
lo, hi = roi.range()
subset_state = RangeSubsetState(lo, hi, att)
else:
# The selection applies to the other component, so we delegate
return other_comp.subset_from_roi(other_att, roi,
other_comp=self,
other_att=att,
coord=other_coord)
else:
# The selection is polygon-like. Categorical components require
# special care, so if the other component is categorical, we need to
# delegate to CategoricalComponent.subset_from_roi.
if isinstance(other_comp, CategoricalComponent):
return other_comp.subset_from_roi(other_att, roi,
other_comp=self,
other_att=att,
is_nested=True,
coord=other_coord)
else:
subset_state = RoiSubsetState()
subset_state.xatt = att
subset_state.yatt = other_att
x, y = roi.to_polygon()
subset_state.roi = PolygonalROI(x, y)
return subset_state
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
:param data: The data to pack into a Component (array-like)
:param units: Optional units
:type units: str
:returns: A Component (or subclass)
"""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.object_):
return CategoricalComponent(data, units=units)
n = coerce_numeric(data)
thresh = 0.5
try:
use_categorical = np.issubdtype(data.dtype, np.character) and \
np.isfinite(n).mean() <= thresh
except TypeError: # isfinite not supported. non-numeric dtype
use_categorical = True
if use_categorical:
return CategoricalComponent(data, units=units)
else:
return Component(n, units=units)
class DerivedComponent(Component):
""" A component which derives its data from a function """
def __init__(self, data, link, units=None):
"""
:param data: The data object to use for calculation
:type data: :class:`~glue.core.data.Data`
:param link: The link that carries out the function
:type link: :class:`~glue.core.component_link.ComponentLink`
:param units: Optional unit description
"""
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def hidden(self):
return self._link.hidden
@property
def data(self):
""" Return the numerical data as a numpy array """
return self._link.compute(self._data)
@property
def link(self):
""" Return the component link """
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
super(CoordinateComponent, self).__init__(None, None)
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
def _calculate(self, view=None):
if self.world:
# Calculating the world coordinates can be a bottleneck if we aren't
# careful, so we need to make sure that if not all dimensions depend
# on each other, we use smart broadcasting.
# The unoptimized way to do this for an N-dimensional dataset would
# be to construct N-dimensional arrays of pixel values for each
# coordinate. However, if we are computing the coordinates for axis
# i, and axis i is not dependent on any other axis, then the result
# will be an N-dimensional array where the same 1D array of
# coordinates will be repeated over and over.
# To optimize this, we therefore essentially consider only the
# dependent dimensions and then broacast the result to the full
# array size at the very end.
# view=None actually adds a dimension which is never what we really
# mean, at least in glue.
if view is None:
view = Ellipsis
# For 1D arrays, slice can be given as a single slice but we need
# to wrap it in a list to make the following code work correctly,
# as it is then consistent with higher-dimensional cases.
if isinstance(view, slice) or np.isscalar(view):
view = [view]
# Some views, e.g. with lists of integer arrays, can give arbitrarily
# complex (copied) subsets of arrays, so in this case we don't do any
# optimization
if view is Ellipsis:
optimize_view = False
else:
for v in view:
if not np.isscalar(v) and not isinstance(v, slice):
optimize_view = False
break
else:
optimize_view = True
pix_coords = []
dep_coords = self._data.coords.dependent_axes(self.axis)
final_slice = []
final_shape = []
for i in range(self._data.ndim):
if optimize_view and i < len(view) and np.isscalar(view[i]):
final_slice.append(0)
else:
final_slice.append(slice(None))
# We set up a 1D pixel axis along that dimension.
pix_coord = np.arange(self._data.shape[i])
# If a view was specified, we need to take it into account for
# that axis.
if optimize_view and i < len(view):
pix_coord = pix_coord[view[i]]
if not np.isscalar(view[i]):
final_shape.append(len(pix_coord))
else:
final_shape.append(self._data.shape[i])
if i not in dep_coords:
# The axis is not dependent on this instance's axis, so we
# just compute the values once and broadcast along this
# dimension later.
pix_coord = 0
pix_coords.append(pix_coord)
# We build the list of N arrays, one for each pixel coordinate
pix_coords = np.meshgrid(*pix_coords, indexing='ij', copy=False)
# Finally we convert these to world coordinates
axis = self._data.ndim - 1 - self.axis
world_coords = self._data.coords.pixel2world_single_axis(*pix_coords[::-1],
axis=axis)
# We get rid of any dimension for which using the view should get
# rid of that dimension.
if optimize_view:
world_coords = world_coords[tuple(final_slice)]
# We then broadcast the final array back to what it should be
world_coords = broadcast_to(world_coords, tuple(final_shape))
# We apply the view if we weren't able to optimize before
if optimize_view:
return world_coords
else:
return world_coords[view]
else:
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
return grids[self.axis]
@property
def shape(self):
""" Tuple of array dimensions. """
return self._data.shape
@property
def ndim(self):
""" Number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
@property
def numeric(self):
return True
@property
def categorical(self):
return False
class CategoricalComponent(Component):
"""
Container for categorical data.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
"""
:param categorical_data: The underlying :class:`numpy.ndarray`
:param categories: List of unique values in the data
:jitter: Strategy for jittering the data
"""
super(CategoricalComponent, self).__init__(None, units)
self._categorical_data = np.asarray(categorical_data)
if self._categorical_data.ndim > 1:
raise ValueError("Categorical Data must be 1-dimensional")
# Disable changing of categories
self._categorical_data.setflags(write=False)
self._categories = categories
self._jitter_method = jitter
self._is_jittered = False
self._data = None
if self._categories is None:
self._update_categories()
else:
self._update_data()
@property
def codes(self):
"""
The index of the category for each value in the array.
"""
return self._data
@property
def labels(self):
"""
The original categorical data.
"""
return self._categorical_data
@property
def categories(self):
"""
The categories.
"""
return self._categories
@property
def data(self):
warnings.warn("The 'data' attribute is deprecated. Use 'codes' "
"instead to access the underlying index of the "
"categories")
return self.codes
@property
def numeric(self):
return False
@property
def categorical(self):
return True
def _update_categories(self, categories=None):
"""
:param categories: A sorted array of categories to find in the dataset.
If None the categories are the unique items in the data.
:return: None
"""
if categories is None:
categories, inv = unique(self._categorical_data)
self._categories = categories
self._data = inv.astype(np.float)
self._data.setflags(write=False)
self.jitter(method=self._jitter_method)
else:
if check_sorted(categories):
self._categories = categories
self._update_data()
else:
raise ValueError("Provided categories must be Sorted")
def _update_data(self):
"""
Converts the categorical data into the numeric representations given
self._categories
"""
self._is_jittered = False
self._data = row_lookup(self._categorical_data, self._categories)
self.jitter(method=self._jitter_method)
self._data.setflags(write=False)
def jitter(self, method=None):
"""
Jitter the data so the density of points can be easily seen in a
scatter plot.
:param method: None | 'uniform':
* None: No jittering is done (or any jittering is undone).
* uniform: A unformly distributed random variable (-0.5, 0.5)
is applied to each point.
:return: None
"""
if method not in set(['uniform', None]):
raise ValueError('%s jitter not supported' % method)
self._jitter_method = method
seed = 1234567890
rand_state = np.random.RandomState(seed)
if (self._jitter_method is None) and self._is_jittered:
self._update_data()
elif (self._jitter_method is 'uniform') and not self._is_jittered:
iswrite = self._data.flags['WRITEABLE']
self._data.setflags(write=True)
self._data += rand_state.uniform(-0.5, 0.5, size=self._data.shape)
self._is_jittered = True
self._data.setflags(write=iswrite)
def subset_from_roi(self, att, roi, other_comp=None, other_att=None,
coord='x', is_nested=False):
"""
Create a SubsetState object from an ROI.
This encapsulates the logic for creating subset states with
CategoricalComponents. There is an important caveat, only RangeROIs
and RectangularROIs make sense in mixed type plots. As such, polygons
are converted to their outer-most edges in this case.
:param att: attribute name of this Component
:param roi: an ROI object
:param other_comp: The other Component for 2D ROIs
:param other_att: The attribute name of the other Component
:param coord: The orientation of this Component
:param is_nested: True if this was passed from another Component.
:return: A SubsetState (or subclass) object
"""
if coord not in ('x', 'y'):
raise ValueError('coord should be one of x/y')
if isinstance(roi, RangeROI):
# The selection is either an x range or a y range
if roi.ori == coord:
# The selection applies to the current component
return CategoricalROISubsetState.from_range(self, att, roi.min, roi.max)
else:
# The selection applies to the other component, so we delegate
other_coord = 'y' if coord == 'x' else 'x'
return other_comp.subset_from_roi(other_att, roi,
other_comp=self,
other_att=att,
coord=other_coord)
elif isinstance(roi, RectangularROI):
# In this specific case, we can decompose the rectangular
# ROI into two RangeROIs that are combined with an 'and'
# logical operation.
other_coord = 'y' if coord == 'x' else 'x'
if coord == 'x':
range1 = XRangeROI(roi.xmin, roi.xmax)
range2 = YRangeROI(roi.ymin, roi.ymax)
else:
range2 = XRangeROI(roi.xmin, roi.xmax)
range1 = YRangeROI(roi.ymin, roi.ymax)
# We get the subset state for the current component
subset1 = self.subset_from_roi(att, range1,
other_comp=other_comp,
other_att=other_att,
coord=coord)
# We now get the subset state for the other component
subset2 = other_comp.subset_from_roi(other_att, range2,
other_comp=self,
other_att=att,
coord=other_coord)
return AndState(subset1, subset2)
elif isinstance(roi, CategoricalROI):
# The selection is categorical itself
return CategoricalROISubsetState(roi=roi, att=att)
else:
# The selection is polygon-like, which requires special care.
if isinstance(other_comp, CategoricalComponent):
# For each category, we check which categories along the other
# axis fall inside the polygon:
selection = {}
for code, label in enumerate(self.categories):
# Determine the coordinates of the points to check
n_other = len(other_comp.categories)
y = np.arange(n_other)
x = np.repeat(code, n_other)
if coord == 'y':
x, y = y, x
# Determine which points are in the polygon, and which
# categories these correspond to
in_poly = roi.contains(x, y)
categories = other_comp.categories[in_poly]
if len(categories) > 0:
selection[label] = set(categories)
return CategoricalROISubsetState2D(selection, att, other_att)
else:
# If the other component is not categorical, we treat this as if
# each categorical component was mapped to a numerical value,
# and at each value, we keep track of the polygon intersection
# with the component. This will result in zero, one, or
# multiple separate numerical ranges for each categorical value.
# TODO: if we ever allow the category order to be changed, we
# need to figure out how to update this!
x, y = roi.to_polygon()
if is_nested:
x, y = y, x
# We loop over each category and for each one we find the
# numerical ranges
selection = {}
for code, label in enumerate(self.categories):
# We determine all the numerical segments that represent the
# ensemble of points in y that fall in the polygon
# TODO: profile the following function
segments = polygon_line_intersections(x, y, xval=code)
if len(segments) > 0:
selection[label] = segments
return CategoricalMultiRangeSubsetState(selection, att, other_att)
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self._categorical_data.ravel(),
dtype=np.object, **kwargs)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/component.py",
"copies": "2",
"size": "23534",
"license": "bsd-3-clause",
"hash": -7402596534527506000,
"line_mean": 32.8618705036,
"line_max": 90,
"alpha_frac": 0.5583411235,
"autogenerated": false,
"ratio": 4.666666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002825064885107453,
"num_lines": 695
} |
from __future__ import absolute_import, division, print_function
import logging
import os
import re
import requests
import socket
from subprocess import STDOUT
try:
from subprocess import SubprocessError
except ImportError: # pragma: no cover
# py2
from subprocess import CalledProcessError as SubprocessError
import time
import warnings
from .utils import shell_out, get_log_content
from .exceptions import YARNException
logger = logging.getLogger(__name__)
class YARNAPI(object):
"""REST interface to YARN
self.auth holds the authentication being used - it can be updated as
needed, and will then be applied to subsequent actions.
Parameters
----------
rm: str
Resource Manager host
rm_port: int
HTTP REST port
scheme: 'http' or 'https'
gateway_path: str
If routing through a proxy, this is prepended to the URL path
kerberos: bool
Whether to use kerberos authentication when speaking to the REST
service. Requires requests_kerberos, see
https://github.com/requests/requests-kerberos
username, password: str
For simple authentication of the REST endpoint.
"""
timeout = 2 # for REST HTTP calls
def __init__(self, rm, rm_port, scheme='http', gateway_path='',
kerberos=False, username=None, password=None):
self.rm = rm
self.rm_port = rm_port
self.scheme = scheme
self.gateway_path = gateway_path
self.host_port = "{0}:{1}".format(self.rm, self.rm_port)
self.url = scheme + '://' + self.host_port + gateway_path + '/ws/v1/'
if kerberos:
from requests_kerberos import HTTPKerberosAuth
self.auth = HTTPKerberosAuth()
elif username and password:
self.auth = (username, password)
else:
self.auth = None
self.username = username
self.password = password
@property
def apps(self):
"""App IDs known to YARN"""
data = self.apps_info()
apps = [d['id'] for d in data]
return apps
def apps_info(self, app_id=None):
"""List app apps, or info for given app"""
if app_id is None:
# this query allows for filtering on a number of parameters
url = self.url + 'cluster/apps/'
logger.debug("Getting Resource Manager Info: {0}".format(url))
r = requests.get(url, timeout=self.timeout, auth=self.auth)
self._verify_response(r)
data = r.json()
return (data.get('apps', None) or {'app': []})['app']
else:
r = requests.get(self.url + 'cluster/apps/{}'.format(app_id),
timeout=self.timeout, auth=self.auth)
self._verify_response(r)
return r.json()['app']
def app_attempts(self, app_id):
"""List of attempt details for given app"""
r = requests.get(self.url + 'cluster/apps/{}/appattempts'.format(
app_id), timeout=self.timeout, auth=self.auth)
self._verify_response(r)
return r.json().get('appAttempts', {'app_attempt': []})['appAttempt']
def app_containers(self, app_id=None, info=None):
"""
Get list of container information for given app. If given app_id,
will automatically get its info, or can skip by providing the info
directly.
Parameters
----------
app_id: str
YARN ID for the app
info: dict
Produced by app_info()
Returns
-------
List of container info dictionaries
"""
if (app_id is None) == (info is None):
raise TypeError('Must provide app_id or info')
if app_id:
info = self.status(app_id)
amHostHttpAddress = info['amHostHttpAddress']
url = "http://{0}/ws/v1/node/containers".format(
amHostHttpAddress)
r = requests.get(url, timeout=self.timeout, auth=self.auth)
self._verify_response(r)
data = r.json()['containers']
if not data:
raise YARNException("No containers available")
container = data['container']
logger.debug(container)
# container_1452274436693_0001_01_000001
def get_app_id_num(x):
return "_".join(x.split("_")[1:3])
app_id_num = get_app_id_num(app_id)
containers = [d for d in container
if get_app_id_num(d['id']) == app_id_num]
return containers
def logs(self, app_id, shell=False, retries=4, delay=3):
"""
Collect logs from RM (if running)
With shell=True, collect logs from HDFS after job completion
Parameters
----------
app_id: str
A yarn application ID string
shell: bool
Shell out to yarn CLI (default False)
retries: int
If CLI is not returning info, retry this many times
delay: number
Seconds to wait between retries
Returns
-------
log: dictionary
logs from each container (when possible)
"""
running = self.state(app_id) == 'RUNNING'
if not shell and running:
# logs are held in memory only while app is running
try:
containers = self.app_containers(app_id)
logs = {}
for c in containers:
log = dict(nodeId=c['nodeId'])
# grab stdout
url = "{0}/stdout/?start=0".format(c['containerLogsLink'])
logger.debug("Gather stdout/stderr data from {0}:"
" {1}".format(c['nodeId'], url))
r = requests.get(url, timeout=self.timeout, auth=self.auth)
log['stdout'] = get_log_content(r.text)
# grab stderr
url = "{0}/stderr/?start=0".format(c['containerLogsLink'])
r = requests.get(url, timeout=self.timeout, auth=self.auth)
log['stderr'] = get_log_content(r.text)
logs[c['id']] = log
return logs
except Exception:
logger.warning("Error while attempting to fetch logs,"
" using fallback", exc_info=1)
# fallback
# TODO: this is just a location in HDFS given by app info
cmd = ["yarn", "logs", "-applicationId", app_id]
while True:
try:
out = shell_out(cmd)
break
except SubprocessError: # pragma: no cover
retries -= 1
if retries < 0:
raise RuntimeError('Retries exceeded when fetching logs for'
' ' + app_id)
time.sleep(delay)
logs = {}
container = None
ltype = 'stdout'
started = False
for line in out.split('\n'):
p = re.compile('Container: ([a-zA-Z0-9_]+) on ([a-zA-Z0-9_]+)')
r = p.match(line)
if r:
container, nodeID = r.groups()
logs[container] = dict(nodeId=nodeID, stdout='', stderr='')
started = False
elif line == 'LogType:stderr':
ltype = 'stderr'
elif line == 'LogType:stdout':
ltype = 'stdout'
elif line == "Log Contents:":
started = True
elif started:
logs[container][ltype] = logs[container][ltype] + '\n' + line
return logs
def container_status(self, container_id):
"""Ask the YARN shell about the given container
Better to use app_containers, assuming you know the app_id, which
is normally "_".join(container_id.split("_")[:3])
"""
cmd = ["yarn", "container", "-status", container_id]
return str(shell_out(cmd))
def state(self, app_id):
"""Current state of given application"""
r = requests.get(self.url + 'cluster/apps/{}/state'.format(app_id),
timeout=self.timeout, auth=self.auth)
self._verify_response(r)
return r.json()['state']
def status(self, app_id):
""" Get status of an application
Parameters
----------
app_id: str
A yarn application ID string
Returns
-------
dictionary: status of application
"""
return self.apps_info(app_id)
def kill_all(self, knit_only=True):
"""Kill a set of applications
Parameters
----------
knit_only: bool (True)
Only kill apps with the name 'knit' (i.e., ones we started)
"""
for app in self.apps:
stat = self.apps_info(app)
if knit_only and stat['name'] != 'knit':
continue
if stat['state'] in ['KILLED', 'FINISHED', 'FAILED']:
continue
self.kill(app)
def kill(self, app_id):
"""
Method to kill a yarn application
Parameters
----------
app_id: str
YARN application id
Returns
-------
bool:
True if successful, False otherwise.
"""
cmd = ["yarn", "application", "-kill", app_id]
try:
out = shell_out(cmd, stderr=STDOUT)
return "Killed application" in out
except SubprocessError:
return False
def _verify_response(self, r):
if not r.ok:
try:
ex = r.json()['RemoteException']
raise YARNException(ex.get('message', str(ex)))
except (ValueError, IndexError):
raise YARNException(r.text)
def cluster_info(self):
"""YARN cluster information: driver, version..."""
r = requests.get(self.url + 'cluster', timeout=self.timeout,
auth=self.auth)
self._verify_response(r)
return r.json()['clusterInfo']
def cluster_metrics(self):
"""YARN cluster global capacity/allocations"""
r = requests.get(self.url + 'cluster/metrics', timeout=self.timeout,
auth=self.auth)
self._verify_response(r)
return r.json()['clusterMetrics']
def scheduler(self):
"""State of the scheduler/queue"""
r = requests.get(self.url + 'cluster/scheduler', timeout=self.timeout,
auth=self.auth)
self._verify_response(r)
return r.json()['scheduler']
def app_stats(self):
"""Number of apps of various states"""
r = requests.get(self.url + 'cluster/appstatistics',
timeout=self.timeout, auth=self.auth)
self._verify_response(r)
return r.json()['appStatInfo']
def nodes(self):
"""Info on YARN's worker nodes"""
r = requests.get(self.url + 'cluster/nodes', timeout=self.timeout,
auth=self.auth)
self._verify_response(r)
return r.json()['nodes']['node']
def system_logs(self):
"""Trouble-shooting method
This will check for RM and NM processes on this machine, and return
their logs, if possible.
"""
nodes = self.nodes()
ips = set((socket.gethostbyname_ex(socket.gethostname())[2] +
socket.gethostbyname_ex('localhost')[2]))
on_rm = socket.gethostbyname(self.rm) in ips
single_node = on_rm and len(nodes) == 1 and (
socket.gethostbyname(nodes[0]['nodeHostName']) in ips)
out = {'single_node': single_node, 'on_rm': on_rm}
try:
import psutil
for p in psutil.process_iter():
if any('NodeManager' in s for s in p.cmdline()):
out['nm_proc'] = p
log = os.path.join([s.split('=')[1] for s in p.cmdline()
if s.startswith('-Dyarn.log.dir')][0],
[s.split('=')[1] for s in p.cmdline()
if s.startswith('-Dyarn.log.file')][0])
out['nm_logfile'] = log
if any('ResourceManager' in s for s in p.cmdline()):
out['rm_proc'] = p
log = os.path.join([s.split('=')[1] for s in p.cmdline()
if s.startswith('-Dyarn.log.dir')][0],
[s.split('=')[1] for s in p.cmdline()
if s.startswith('-Dyarn.log.file')][0])
out['rm_logfile'] = log
except ImportError:
warnings.warn('psutil is not installed')
return out
| {
"repo_name": "blaze/knit",
"path": "knit/yarn_api.py",
"copies": "2",
"size": "12916",
"license": "bsd-3-clause",
"hash": -7954509776072803000,
"line_mean": 34.2896174863,
"line_max": 80,
"alpha_frac": 0.5252400124,
"autogenerated": false,
"ratio": 4.278237827095064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5803477839495065,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import os
import sys
from conda_build.conda_interface import ArgumentParser
from conda_build import api
from conda_build.index import MAX_THREADS_DEFAULT
from conda_build.utils import DEFAULT_SUBDIRS
logging.basicConfig(level=logging.INFO)
def parse_args(args):
p = ArgumentParser(
description="Update package index metadata files in given directories.")
p.add_argument(
'dir',
help='Directory that contains an index to be updated.',
nargs='*',
default=[os.getcwd()],
)
p.add_argument(
'-c', "--check-md5",
action="store_true",
help="""Use hash values instead of file modification times for determining if a
package's metadata needs to be updated.""",
)
p.add_argument(
"-n", "--channel-name",
help="Customize the channel name listed in each channel's index.html.",
)
p.add_argument(
'-s', '--subdir',
action='append',
help='Optional. The subdir to index. Can be given multiple times. If not provided, will '
'default to all of %s. If provided, will not create channeldata.json for the channel.'
'' % ', '.join(DEFAULT_SUBDIRS),
)
p.add_argument(
'-t', '--threads',
default=MAX_THREADS_DEFAULT,
type=int,
)
p.add_argument(
"-p", "--patch-generator",
help="Path to Python file that outputs metadata patch instructions"
)
p.add_argument(
"--hotfix-source-repo",
help="URL of git repo that hosts your metadata patch instructions"
)
p.add_argument(
"--verbose", help="show extra debugging info", action="store_true"
)
p.add_argument(
"--no-progress", help="Hide progress bars", action="store_false", dest="progress"
)
p.add_argument(
"--current-index-versions-file", "-m",
help="""
YAML file containing name of package as key, and list of versions as values. The current_index.json
will contain the newest from this series of versions. For example:
python:
- 2.7
- 3.6
will keep python 2.7.X and 3.6.Y in the current_index.json, instead of only the very latest python version.
"""
)
args = p.parse_args(args)
return p, args
def execute(args):
_, args = parse_args(args)
api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,
threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator,
verbose=args.verbose, progress=args.progress, hotfix_source_repo=args.hotfix_source_repo,
current_index_versions=args.current_index_versions_file)
def main():
return execute(sys.argv[1:])
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/cli/main_index.py",
"copies": "2",
"size": "2870",
"license": "bsd-3-clause",
"hash": 8094977412411163000,
"line_mean": 30.5384615385,
"line_max": 115,
"alpha_frac": 0.6229965157,
"autogenerated": false,
"ratio": 3.953168044077135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001131488613909042,
"num_lines": 91
} |
from __future__ import absolute_import, division, print_function
import logging
import os
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import (signature_constants, tag_constants)
def get_optimizer_by_name(optimizer_name, learning_rate):
"""
Get optimizer object by the optimizer name.
Args:
optimizer_name: Name of the optimizer.
learning_rate: The learning rate.
Return:
The optimizer object.
"""
logging.info("Use the optimizer: {}".format(optimizer_name))
if optimizer_name == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer_name == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer_name == "adagrad":
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif optimizer_name == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate)
elif optimizer_name == "ftrl":
optimizer = tf.train.FtrlOptimizer(learning_rate)
elif optimizer_name == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(learning_rate)
else:
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
return optimizer
def save_model(model_path,
model_version,
sess,
signature_def_map,
is_save_graph=False):
"""
Save the model in standard SavedModel format.
Args:
model_path: The path to model.
model_version: The version of model.
sess: The TensorFlow Session object.
signature_def_map: The map of TensorFlow SignatureDef object.
is_save_graph: Should save graph file of not.
Return:
None
"""
export_path = os.path.join(model_path, str(model_version))
if os.path.isdir(export_path) == True:
logging.error("The model exists in path: {}".format(export_path))
return
try:
# Save the SavedModel
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
clear_devices=True,
signature_def_map=signature_def_map,
legacy_init_op=legacy_init_op)
logging.info("Save the model in: {}".format(export_path))
builder.save()
# Save the GraphDef
if is_save_graph == True:
graph_file_name = "graph.pb"
logging.info("Save the graph file in: {}".format(model_path))
tf.train.write_graph(
sess.graph_def, model_path, graph_file_name, as_text=False)
except Exception as e:
logging.error("Fail to export saved model, exception: {}".format(e))
def restore_from_checkpoint(sess, saver, checkpoint_file_path):
"""
Restore session from checkpoint files.
Args:
sess: TensorFlow Session object.
saver: TensorFlow Saver object.
checkpoint_file_path: The checkpoint file path.
Return:
True if restore successfully and False if fail
"""
if checkpoint_file_path:
logging.info(
"Restore session from checkpoint: {}".format(checkpoint_file_path))
saver.restore(sess, checkpoint_file_path)
return True
else:
logging.error("Checkpoint not found: {}".format(checkpoint_file_path))
return False
| {
"repo_name": "tobegit3hub/deep_recommend_system",
"path": "util.py",
"copies": "1",
"size": "3354",
"license": "apache-2.0",
"hash": 8266081195671842000,
"line_mean": 30.641509434,
"line_max": 78,
"alpha_frac": 0.675313059,
"autogenerated": false,
"ratio": 3.9182242990654204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5093537358065421,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import os
import time
import uuid
from os import environ as env
from threading import Thread
from time import sleep
from mentos.exceptions import ExecutorException
from mentos.subscription import Event, Subscription
from mentos.utils import decode_data, encode_data, parse_duration
from toolz import merge
from tornado.ioloop import IOLoop
log = logging.getLogger(__name__)
class ExecutorDriver():
def __init__(self, executor, handlers={}, loop=None):
self.loop = loop or IOLoop()
self.master = env.get('MESOS_AGENT_ENDPOINT')
self.framework_id = dict(value=env.get('MESOS_FRAMEWORK_ID'))
self.executor_id = dict(value=env.get('MESOS_EXECUTOR_ID'))
self.framework = {'id': self.framework_id,
'framework_id': self.framework_id,
'executor_id': self.executor_id}
grace_shutdown_period = env.get('MESOS_EXECUTOR_SHUTDOWN_GRACE_PERIOD')
if grace_shutdown_period: # pragma: no cover
self.grace_shutdown_period = parse_duration(grace_shutdown_period)
else:
self.grace_shutdown_period = 0.0
self.checkpoint = bool(env.get('MESOS_CHECKPOINT'))
self.local = bool(env.get('MESOS_LOCAL', True))
self.executor = executor
self.framework_info = None
self.executor_info = None
self.executor = executor
defaults = {Event.SUBSCRIBED: self.on_subscribed,
Event.CLOSE: self.on_close,
Event.MESSAGE: self.on_message,
Event.ERROR: self.on_error,
Event.ACKNOWLEDGED: self.on_acknowledged,
Event.KILL: self.on_kill,
Event.LAUNCH_GROUP: self.on_launch_group,
Event.LAUNCH: self.on_launch,
Event.SHUTDOWN: self.on_shutdown,
Event.OUTBOUND_SUCCESS: self.on_outbound_success,
Event.OUTBOUND_ERROR: self.on_outbound_error}
self.handlers = merge(defaults, handlers)
self.subscription = Subscription(self.framework, self.master,
'/api/v1/executor', self.handlers,
loop=self.loop)
self.subscription.tasks = {}
self.subscription.updates = {}
def start(self, block=False, **kwargs):
'''Start executor running in separate thread'''
if not self.loop._running:
self._loop_thread = Thread(target=self.loop.start)
self._loop_thread.daemon = True
self._loop_thread.start()
while not self.loop._running: # pragma: no cover
sleep(0.001)
self.loop.add_callback(self.subscription.start)
if block: # pragma: no cover
self._loop_thread.join()
def stop(self):
log.debug('Terminating Scheduler Driver')
self.subscription.close()
self.loop.add_callback(self.loop.stop)
while self.loop._running: # pragma: no cover
sleep(0.1)
def update(self, status):
if 'timestamp' not in status:
status['timestamp'] = int(time.time())
if 'uuid' not in status:
status['uuid'] = encode_data(uuid.uuid4().bytes)
if 'source' not in status:
status['source'] = 'SOURCE_EXECUTOR'
payload = {
'type': 'UPDATE',
'framework_id': self.framework_id,
'executor_id': self.executor_id,
'update': {
'status': status
}
}
self.loop.add_callback(self.subscription.send, payload)
logging.debug('Executor sends status update {} for task {}'.format(
status['state'], status['task_id']))
def message(self, message):
payload = {
'type': 'MESSAGE',
'framework_id': self.framework_id,
'executor_id': self.executor_id,
'message': {
'data': encode_data(message)
}
}
self.loop.add_callback(self.subscription.send, payload)
logging.debug('Driver sends framework message {}'.format(message))
def on_subscribed(self, info):
executor_info = info['executor_info']
framework_info = info['framework_info']
agent_info = info['agent_info']
if executor_info['executor_id'] != self.executor_id: # pragma: no cover
raise ExecutorException('Mismatched executor_id\'s')
if framework_info['id'] != self.framework_id: # pragma: no cover
raise ExecutorException('Mismatched framework_ids')
if self.executor_info is None or self.framework_info is None:
self.executor_info = executor_info
self.framework_info = framework_info
self.executor.on_registered(
self, executor_info,
self.framework_info, agent_info
)
else: # pragma: no cover
self.executor.on_reregistered(self, agent_info)
log.debug('Subscribed with info {}'.format(info))
def on_close(self):
if not self.checkpoint:
if not self.local: # pragma: no cover
self._delay_kill()
self.executor.on_shutdown(self)
log.debug('Got close command')
def on_launch_group(self, event):
task_info = event['task']
task_id = task_info['task_id']['value']
if task_id in self.subscription.tasks: # pragma: no cover
raise ExecutorException('Task Exists')
self.subscription.tasks[task_id] = task_info
self.executor.on_launch(self, task_info)
log.debug('Got launch group command {}'.format(event))
def on_launch(self, event):
task_info = event['task']
task_id = task_info['task_id']['value']
if task_id in self.subscription.tasks:
raise ExecutorException('Task Exists')
self.subscription.tasks[task_id] = task_info
log.debug('Launching {}'.format(event))
self.executor.on_launch(self, task_info)
log.debug('Got launch command {}'.format(event))
def on_kill(self, event):
task_id = event['task_id']
self.executor.on_kill(self, task_id)
log.debug('Got kill command {}'.format(event))
def on_acknowledged(self, event):
task_id = event['task_id']['value']
uuid_ = uuid.UUID(bytes=decode_data(event['uuid']))
self.subscription.updates.pop(uuid_, None)
self.subscription.tasks.pop(task_id, None)
self.executor.on_acknowledged(self, task_id, uuid_)
log.debug('Got acknowledge {}'.format(event))
def on_message(self, event):
data = event['data']
self.executor.on_message(self, data)
def on_error(self, event):
message = event['message']
self.executor.on_error(self, message)
log.debug('Got error {}'.format(event))
def on_shutdown(self):
if not self.local: # pragma: no cover
self._delay_kill()
self.executor.on_shutdown(self)
log.debug('Got Shutdown command')
self.stop()
def on_outbound_success(self, event):
self.executor.on_outbound_success(self, event['request'])
log.debug('Got success on outbound {}'.format(event))
def on_outbound_error(self, event):
self.executor.on_outbound_error(
self, event['request'], event['endpoint'], event['error'])
log.debug('Got error on outbound {}'.format(event))
def _delay_kill(self): # pragma: no cover
def _():
try:
time.sleep(self.grace_shutdown_period)
os._exit(os.EX_OK)
except Exception:
log.exception('Failed to force kill executor')
t = Thread(target=_)
t.daemon = True
t.start()
def __str__(self):
return '<{}: executor={}:{}:{}>'.format(
self.__class__.__name__, self.master,
self.subscription.master_info.info, self.framework)
__repr__ = __str__
| {
"repo_name": "Arttii/malefico",
"path": "mentos/executor.py",
"copies": "2",
"size": "8173",
"license": "apache-2.0",
"hash": 8053147359128773000,
"line_mean": 35.3244444444,
"line_max": 80,
"alpha_frac": 0.581793711,
"autogenerated": false,
"ratio": 4.008337420304071,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5590131131304071,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import logging
import os
import pytest
import py.path
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session', autouse=True)
def aws_credentials():
# Handle change in https://github.com/spulec/moto/issues/1924
# Ensure AWS SDK finds some (bogus) credentials in the environment and
# doesn't try to use other providers.
overrides = {
'AWS_ACCESS_KEY_ID': 'testing',
'AWS_SECRET_ACCESS_KEY': 'testing',
'AWS_DEFAULT_REGION': 'us-east-1'
}
saved_env = {}
for key, value in overrides.items():
logger.info('Overriding env var: {}={}'.format(key, value))
saved_env[key] = os.environ.get(key, None)
os.environ[key] = value
yield
for key, value in saved_env.items():
logger.info('Restoring saved env var: {}={}'.format(key, value))
if value is None:
del os.environ[key]
else:
os.environ[key] = value
saved_env.clear()
@pytest.fixture(scope="package")
def stacker_fixture_dir():
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'fixtures')
return py.path.local(path)
| {
"repo_name": "remind101/stacker",
"path": "stacker/tests/conftest.py",
"copies": "1",
"size": "1230",
"license": "bsd-2-clause",
"hash": -6445209468204074000,
"line_mean": 26.9545454545,
"line_max": 74,
"alpha_frac": 0.6203252033,
"autogenerated": false,
"ratio": 3.6070381231671553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9727363326467156,
"avg_score": 0,
"num_lines": 44
} |
from __future__ import absolute_import, division, print_function
import logging
import pywt
import numpy as np
from TotalActivation.filters.filter_boundary import filter_boundary_normal, filter_boundary_transpose
# from TotalActivation.process.utils import mad
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def mad(X, axis=0):
"""
Median absolute deviation
:param X: Input matrix
;param axis: Axis to calculate quantity (default = 0)
:return: MAD for X along axis
"""
return np.median(np.abs(X - np.median(X, axis=axis)), axis=axis)
def wiener(X, hrfparam, Lambda, n_vox, n_tp):
"""
Perform Wiener-based temporal deconvolution.
:param X: time x voxels matrix
:param hrfparam: HRF parameters
:param l: Lambda
;param n_vox: number of voxels
;param n_tp: number of time points
:return: Deconvolved time series
"""
f_num = np.abs(np.fft.fft(hrfparam['num'], n_tp) ** 2)
f_den = np.abs(np.fft.fft(hrfparam['den'][0], n_tp) * \
np.fft.fft(hrfparam['den'][1], n_tp) * \
hrfparam['den'][-1] * \
np.exp(np.arange(1, n_tp + 1) * (hrfparam['den'][1].shape[0] - 1) / n_tp)) ** 2
_, coef = pywt.wavedec(X, 'db3', level=1, axis=0)
lambda_temp = mad(coef) * Lambda ** 2 * n_tp
res = np.real(np.fft.ifft(np.fft.fft(X) * (np.repeat(f_den, n_vox).reshape(n_tp, n_vox) / (
np.repeat(f_den, n_vox).reshape(n_tp, n_vox) + np.kron(f_num, lambda_temp).reshape(n_tp, n_vox))), axis=1))
return res
# TODO this function needs love
def temporal_TA(X, f_analyze, max_eig, n_tp, Nit, noise_estimate_fin, l, cost_save, voxels=None):
if voxels is None:
_, coef = pywt.wavedec(X, 'db3', level=1, axis=0)
lambda_temp = mad(coef) * l
if noise_estimate_fin is not None:
lambdas_temp_fin = np.atleast_1d(noise_estimate_fin).copy()
else:
lambdas_temp_fin = np.atleast_1d(lambda_temp).copy()
if cost_save is not False:
cost_temp = np.zeros((Nit, 1))
else:
cost_temp = None
else:
X = X[:, voxels]
_, coef = pywt.wavedec(X, 'db3', level=1, axis=0)
lambda_temp = mad(coef) * l
if noise_estimate_fin is not None:
lambdas_temp_fin = np.atleast_1d(noise_estimate_fin[voxels]).copy()
else:
lambdas_temp_fin = np.atleast_1d(lambda_temp).copy()
if cost_save is not False:
cost_temp = np.zeros((Nit, 1))
else:
cost_temp = None
noise_estimate = np.atleast_1d(lambda_temp).copy()
noise_estimate = np.minimum(noise_estimate, 0.95)
precision = noise_estimate / 100000.0
z = np.zeros_like(X)
k = 0
t = 1
s = np.zeros_like(X)
while k < Nit:
z_l = z.copy()
z0 = filter_boundary_normal(f_analyze, X)
z1 = 1.0 / (lambdas_temp_fin * max_eig) * z0
z2 = filter_boundary_transpose(f_analyze, s)
z3 = filter_boundary_normal(f_analyze, z2)
z4 = z1 + s
z = z4 - z3 / max_eig
z = np.maximum(np.minimum(z, 1), -1)
t_l = t
t = (1 + np.sqrt(1.0 + 4.0 * (np.power(t, 2)))) / 2.0
s = z + (t_l - 1.0) / t * (z - z_l)
if cost_save is not None:
temp = X - lambdas_temp_fin * filter_boundary_transpose(f_analyze, z)
cost_temp = np.sum(np.power(temp - X, 2), axis=0) / 2.0 + lambdas_temp_fin * np.sum(
np.abs(filter_boundary_normal(f_analyze, temp)), axis=0)
noise_estimate_fin = np.sqrt(np.sum(np.power(temp - X, 2.0), axis=0) / n_tp)
else:
nv_tmp1 = filter_boundary_transpose(f_analyze, z)
nv_tmp2 = lambdas_temp_fin * nv_tmp1
noise_estimate_fin = np.sqrt(np.sum(np.power(nv_tmp2, 2.0), axis=0) / n_tp)
if np.any(np.abs(noise_estimate_fin - noise_estimate) > precision):
gp = np.where(np.abs(noise_estimate_fin - noise_estimate) > precision)[0]
lambdas_temp_fin[gp] = lambdas_temp_fin[gp] * noise_estimate[gp] / noise_estimate_fin[gp]
k += 1
Y = X - lambdas_temp_fin * filter_boundary_transpose(f_analyze, z)
return Y # , noise_estimate_fin, lambdas_temp_fin, cost_temp
| {
"repo_name": "mfalkiewicz/pyTotalActivation",
"path": "TotalActivation/process/temporal.py",
"copies": "2",
"size": "4292",
"license": "mit",
"hash": -7822646072191780000,
"line_mean": 33.6129032258,
"line_max": 115,
"alpha_frac": 0.5775862069,
"autogenerated": false,
"ratio": 2.9157608695652173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44933470764652167,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.