text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import platform
import datetime
__all__ = ['get_sys_dict', 'system_info']
def get_sys_dict():
"""
Test which packages are installed on system.
Returns
-------
sys_prop : `dict`
A dictionary containing the programs and versions installed on this
machine
"""
try:
from sunpy.version import version as sunpy_version
from sunpy.version import githash as sunpy_git_description
except ImportError:
sunpy_version = 'Missing version.py; re-run setup.py'
sunpy_git_description = 'N/A'
# Dependencies
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = "NOT INSTALLED"
try:
from scipy import __version__ as scipy_version
except ImportError:
scipy_version = "NOT INSTALLED"
try:
from matplotlib import __version__ as matplotlib_version
except ImportError:
matplotlib_version = "NOT INSTALLED"
try:
from astropy import __version__ as astropy_version
except ImportError:
astropy_version = "NOT INSTALLED"
try:
from pandas import __version__ as pandas_version
except ImportError:
pandas_version = "NOT INSTALLED"
try:
from bs4 import __version__ as bs4_version
except ImportError:
bs4_version = "NOT INSTALLED"
try:
from PyQt4.QtCore import PYQT_VERSION_STR as pyqt_version
except ImportError:
pyqt_version = "NOT INSTALLED"
try:
from suds import __version__ as suds_version
except ImportError:
suds_version = "NOT INSTALLED"
try:
from sqlalchemy import __version__ as sqlalchemy_version
except ImportError:
sqlalchemy_version = "NOT INSTALLED"
try:
from requests import __version__ as requests_version
except ImportError:
requests_version = "NOT INSTALLED"
sys_prop = {'Time':datetime.datetime.utcnow().strftime("%A, %d. %B %Y %I:%M%p UT"),
'System':platform.system(), 'Processor':platform.processor(),
'SunPy':sunpy_version, 'SunPy_git':sunpy_git_description,
'Arch':platform.architecture()[0], "Python":platform.python_version(),
'NumPy':numpy_version,
'SciPy':scipy_version, 'matplotlib':matplotlib_version,
'Astropy':astropy_version, 'Pandas':pandas_version,
'beautifulsoup':bs4_version, 'PyQt':pyqt_version,
'SUDS':suds_version, 'Sqlalchemy':sqlalchemy_version, 'Requests':requests_version
}
return sys_prop
def system_info():
"""
Takes dictionary from sys_info() and prints the contents in an attractive fashion.
"""
sys_prop = get_sys_dict()
# title
print("==========================================================")
print(" SunPy Installation Information\n")
print("==========================================================\n")
# general properties
print("###########")
print(" General")
print("###########")
# OS and architecture information
for sys_info in ['Time', 'System', 'Processor', 'Arch', 'SunPy', 'SunPy_git']:
print('{0} : {1}'.format(sys_info, sys_prop[sys_info]))
if sys_prop['System'] == "Linux":
distro = " ".join(platform.linux_distribution())
print("OS: {0} (Linux {1} {2})".format(distro, platform.release(), sys_prop['Processor']))
elif sys_prop['System'] == "Darwin":
print("OS: Mac OS X {0} ({1})".format(platform.mac_ver()[0], sys_prop['Processor']))
elif sys_prop['System'] == "Windows":
print("OS: Windows {0} {1} ({2})".format(platform.release(),
platform.version(), sys_prop['Processor']))
else:
print("Unknown OS ({0})".format(sys_prop['Processor']))
print("\n")
# required libraries
print("###########")
print(" Required Libraries ")
print("###########")
for sys_info in ['Python', 'NumPy', 'SciPy',
'matplotlib', 'Astropy', 'Pandas']:
print('{0}: {1}'.format(sys_info, sys_prop[sys_info]))
print("\n")
# recommended
print("###########")
print(" Recommended Libraries ")
print("###########")
for sys_info in ['beautifulsoup', 'PyQt', 'SUDS',
'Sqlalchemy', 'Requests']:
print('{0}: {1}'.format(sys_info, sys_prop[sys_info]))
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/util/sysinfo.py",
"copies": "1",
"size": "4513",
"license": "bsd-2-clause",
"hash": 7959638460580176000,
"line_mean": 29.7006802721,
"line_max": 98,
"alpha_frac": 0.5705738976,
"autogenerated": false,
"ratio": 4.15561694290976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.522619084050976,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import platform
import sys
from threading import Thread, Lock
import json
import warnings
import time
import stripe
import pytest
if platform.python_implementation() == "PyPy":
pytest.skip("skip integration tests with PyPy", allow_module_level=True)
if sys.version_info[0] < 3:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
class TestIntegration(object):
@pytest.fixture(autouse=True)
def close_mock_server(self):
yield
if self.mock_server:
self.mock_server.shutdown()
self.mock_server.server_close()
self.mock_server_thread.join()
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_base": stripe.api_base,
"api_key": stripe.api_key,
"default_http_client": stripe.default_http_client,
"enable_telemetry": stripe.enable_telemetry,
"max_network_retries": stripe.max_network_retries,
"proxy": stripe.proxy,
}
stripe.api_base = "http://localhost:12111" # stripe-mock
stripe.api_key = "sk_test_123"
stripe.default_http_client = None
stripe.enable_telemetry = False
stripe.max_network_retries = 3
stripe.proxy = None
yield
stripe.api_base = orig_attrs["api_base"]
stripe.api_key = orig_attrs["api_key"]
stripe.default_http_client = orig_attrs["default_http_client"]
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
stripe.max_network_retries = orig_attrs["max_network_retries"]
stripe.proxy = orig_attrs["proxy"]
def setup_mock_server(self, handler):
# Configure mock server.
# Passing 0 as the port will cause a random free port to be chosen.
self.mock_server = HTTPServer(("localhost", 0), handler)
_, self.mock_server_port = self.mock_server.server_address
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def test_hits_api_base(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_hits_proxy_through_default_http_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.proxy = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
stripe.proxy = "http://bad-url"
with warnings.catch_warnings(record=True) as w:
stripe.Balance.retrieve()
assert len(w) == 1
assert "stripe.proxy was updated after sending a request" in str(
w[0].message
)
assert MockServerRequestHandler.num_requests == 2
def test_hits_proxy_through_custom_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.default_http_client = (
stripe.http_client.new_default_http_client(
proxy="http://localhost:%s" % self.mock_server_port
)
)
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_passes_client_telemetry_when_enabled(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
try:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if req_num == 1:
time.sleep(31 / 1000) # 31 ms
assert not self.headers.get(
"X-Stripe-Client-Telemetry"
)
elif req_num == 2:
assert self.headers.get("X-Stripe-Client-Telemetry")
telemetry = json.loads(
self.headers.get("x-stripe-client-telemetry")
)
assert "last_request_metrics" in telemetry
req_id = telemetry["last_request_metrics"][
"request_id"
]
duration_ms = telemetry["last_request_metrics"][
"request_duration_ms"
]
assert req_id == "req_1"
# The first request took 31 ms, so the client perceived
# latency shouldn't be outside this range.
assert 30 < duration_ms < 300
else:
assert False, (
"Should not have reached request %d" % req_num
)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
except AssertionError as ex:
# Throwing assertions on the server side causes a
# connection error to be logged instead of an assertion
# failure. Instead, we return the assertion failure as
# json so it can be logged as a StripeError.
self.send_response(400)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(
json.dumps(
{
"error": {
"type": "invalid_request_error",
"message": str(ex),
}
}
).encode("utf-8")
)
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.Balance.retrieve()
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 2
def test_uses_thread_local_client_telemetry(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
seen_metrics = set()
stats_lock = Lock()
def do_GET(self):
with self.__class__.stats_lock:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if self.headers.get("X-Stripe-Client-Telemetry"):
telemetry = json.loads(
self.headers.get("X-Stripe-Client-Telemetry")
)
req_id = telemetry["last_request_metrics"]["request_id"]
with self.__class__.stats_lock:
self.__class__.seen_metrics.add(req_id)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.default_http_client = stripe.http_client.RequestsClient()
def work():
stripe.Balance.retrieve()
stripe.Balance.retrieve()
threads = [Thread(target=work) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert MockServerRequestHandler.num_requests == 20
assert len(MockServerRequestHandler.seen_metrics) == 10
| {
"repo_name": "stripe/stripe-python",
"path": "tests/test_integration.py",
"copies": "1",
"size": "9930",
"license": "mit",
"hash": -2136499152687172400,
"line_mean": 37.488372093,
"line_max": 79,
"alpha_frac": 0.529204431,
"autogenerated": false,
"ratio": 4.495246717971933,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5524451148971934,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import platform
import sys
import types
import warnings
PY2 = sys.version_info[0] == 2
PYPY = platform.python_implementation() == "PyPy"
HAS_F_STRINGS = (
sys.version_info[:2] >= (3, 7)
if not PYPY
else sys.version_info[:2] >= (3, 6)
)
PY310 = sys.version_info[:2] >= (3, 10)
if PYPY or sys.version_info[:2] >= (3, 6):
ordered_dict = dict
else:
from collections import OrderedDict
ordered_dict = OrderedDict
if PY2:
from collections import Mapping, Sequence
from UserDict import IterableUserDict
# We 'bundle' isclass instead of using inspect as importing inspect is
# fairly expensive (order of 10-15 ms for a modern machine in 2016)
def isclass(klass):
return isinstance(klass, (type, types.ClassType))
def new_class(name, bases, kwds, exec_body):
"""
A minimal stub of types.new_class that we need for make_class.
"""
ns = {}
exec_body(ns)
return type(name, bases, ns)
# TYPE is used in exceptions, repr(int) is different on Python 2 and 3.
TYPE = "type"
def iteritems(d):
return d.iteritems()
# Python 2 is bereft of a read-only dict proxy, so we make one!
class ReadOnlyDict(IterableUserDict):
"""
Best-effort read-only dict wrapper.
"""
def __setitem__(self, key, val):
# We gently pretend we're a Python 3 mappingproxy.
raise TypeError(
"'mappingproxy' object does not support item assignment"
)
def update(self, _):
# We gently pretend we're a Python 3 mappingproxy.
raise AttributeError(
"'mappingproxy' object has no attribute 'update'"
)
def __delitem__(self, _):
# We gently pretend we're a Python 3 mappingproxy.
raise TypeError(
"'mappingproxy' object does not support item deletion"
)
def clear(self):
# We gently pretend we're a Python 3 mappingproxy.
raise AttributeError(
"'mappingproxy' object has no attribute 'clear'"
)
def pop(self, key, default=None):
# We gently pretend we're a Python 3 mappingproxy.
raise AttributeError(
"'mappingproxy' object has no attribute 'pop'"
)
def popitem(self):
# We gently pretend we're a Python 3 mappingproxy.
raise AttributeError(
"'mappingproxy' object has no attribute 'popitem'"
)
def setdefault(self, key, default=None):
# We gently pretend we're a Python 3 mappingproxy.
raise AttributeError(
"'mappingproxy' object has no attribute 'setdefault'"
)
def __repr__(self):
# Override to be identical to the Python 3 version.
return "mappingproxy(" + repr(self.data) + ")"
def metadata_proxy(d):
res = ReadOnlyDict()
res.data.update(d) # We blocked update, so we have to do it like this.
return res
def just_warn(*args, **kw): # pragma: no cover
"""
We only warn on Python 3 because we are not aware of any concrete
consequences of not setting the cell on Python 2.
"""
else: # Python 3 and later.
from collections.abc import Mapping, Sequence # noqa
def just_warn(*args, **kw):
"""
We only warn on Python 3 because we are not aware of any concrete
consequences of not setting the cell on Python 2.
"""
warnings.warn(
"Running interpreter doesn't sufficiently support code object "
"introspection. Some features like bare super() or accessing "
"__class__ will not work with slotted classes.",
RuntimeWarning,
stacklevel=2,
)
def isclass(klass):
return isinstance(klass, type)
TYPE = "class"
def iteritems(d):
return d.items()
new_class = types.new_class
def metadata_proxy(d):
return types.MappingProxyType(dict(d))
def make_set_closure_cell():
"""Return a function of two arguments (cell, value) which sets
the value stored in the closure cell `cell` to `value`.
"""
# pypy makes this easy. (It also supports the logic below, but
# why not do the easy/fast thing?)
if PYPY:
def set_closure_cell(cell, value):
cell.__setstate__((value,))
return set_closure_cell
# Otherwise gotta do it the hard way.
# Create a function that will set its first cellvar to `value`.
def set_first_cellvar_to(value):
x = value
return
# This function will be eliminated as dead code, but
# not before its reference to `x` forces `x` to be
# represented as a closure cell rather than a local.
def force_x_to_be_a_cell(): # pragma: no cover
return x
try:
# Extract the code object and make sure our assumptions about
# the closure behavior are correct.
if PY2:
co = set_first_cellvar_to.func_code
else:
co = set_first_cellvar_to.__code__
if co.co_cellvars != ("x",) or co.co_freevars != ():
raise AssertionError # pragma: no cover
# Convert this code object to a code object that sets the
# function's first _freevar_ (not cellvar) to the argument.
if sys.version_info >= (3, 8):
# CPython 3.8+ has an incompatible CodeType signature
# (added a posonlyargcount argument) but also added
# CodeType.replace() to do this without counting parameters.
set_first_freevar_code = co.replace(
co_cellvars=co.co_freevars, co_freevars=co.co_cellvars
)
else:
args = [co.co_argcount]
if not PY2:
args.append(co.co_kwonlyargcount)
args.extend(
[
co.co_nlocals,
co.co_stacksize,
co.co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
# These two arguments are reversed:
co.co_cellvars,
co.co_freevars,
]
)
set_first_freevar_code = types.CodeType(*args)
def set_closure_cell(cell, value):
# Create a function using the set_first_freevar_code,
# whose first closure cell is `cell`. Calling it will
# change the value of that cell.
setter = types.FunctionType(
set_first_freevar_code, {}, "setter", (), (cell,)
)
# And call it to set the cell.
setter(value)
# Make sure it works on this interpreter:
def make_func_with_cell():
x = None
def func():
return x # pragma: no cover
return func
if PY2:
cell = make_func_with_cell().func_closure[0]
else:
cell = make_func_with_cell().__closure__[0]
set_closure_cell(cell, 100)
if cell.cell_contents != 100:
raise AssertionError # pragma: no cover
except Exception:
return just_warn
else:
return set_closure_cell
set_closure_cell = make_set_closure_cell()
| {
"repo_name": "python-attrs/attrs",
"path": "src/attr/_compat.py",
"copies": "2",
"size": "7713",
"license": "mit",
"hash": 8983622902902806000,
"line_mean": 30.1008064516,
"line_max": 79,
"alpha_frac": 0.5582782316,
"autogenerated": false,
"ratio": 4.26603982300885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.582431805460885,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pymongo
from contextlib import contextmanager
import datashape
from into import discover, convert, append, resource, dshape
from into.backends.mongo import *
from toolz import pluck
from copy import deepcopy
from bson.objectid import ObjectId
conn = pymongo.MongoClient()
db = conn._test_db
@contextmanager
def coll(data):
c = db.my_collection
if data:
c.insert(deepcopy(data))
try:
yield c
finally:
c.drop()
bank = ({'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300})
ds = dshape('var * {name: string, amount: int}')
def test_discover():
with coll(bank) as c:
assert discover(bank) == discover(c)
def test_append_convert():
with coll([]) as c:
append(c, bank, dshape=ds)
assert convert(list, c, dshape=ds) == list(pluck(['name', 'amount'], bank))
def test_resource():
coll = resource('mongodb://localhost:27017/db::mycoll')
assert coll.name == 'mycoll'
assert coll.database.name == 'db'
assert coll.database.connection.host == 'localhost'
assert coll.database.connection.port == 27017
def test_multiple_object_ids():
data = [{'x': 1, 'y': 2, 'other': ObjectId('1' * 24)},
{'x': 3, 'y': 4, 'other': ObjectId('2' * 24)}]
with coll(data) as c:
assert discover(c) == dshape('2 * {x: int64, y: int64}')
assert convert(list, c) == [(1, 2), (3, 4)]
| {
"repo_name": "mrocklin/into",
"path": "into/backends/tests/test_mongo.py",
"copies": "1",
"size": "1606",
"license": "bsd-3-clause",
"hash": 7301611724500734000,
"line_mean": 24.09375,
"line_max": 83,
"alpha_frac": 0.6052303861,
"autogenerated": false,
"ratio": 3.3598326359832638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4465063022083264,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pymongo
from pymongo.collection import Collection
from collections import Iterator
from datashape import discover, DataShape, Record, var
from datashape.predicates import isdimension
from datashape.dispatch import dispatch
from toolz import take, partition_all, concat, pluck
import copy
from bson.objectid import ObjectId
import re
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
@discover.register(Collection)
def discover_pymongo_collection(coll, n=50):
items = list(take(n, coll.find()))
if not items:
return var * Record([])
oid_cols = [k for k, v in items[0].items() if isinstance(v, ObjectId)]
for item in items:
for col in oid_cols:
del item[col]
ds = discover(items)
if isdimension(ds[0]):
return coll.count() * ds.subshape[0]
else:
raise ValueError("Consistent datashape not found")
def _into_iter_mongodb(coll, columns=None, dshape=None):
""" Into helper function
Return both a lazy sequence of tuples and a list of column names
"""
seq = coll.find()
if not columns and dshape:
columns = dshape.measure.names
elif not columns:
item = next(seq)
seq = concat([[item], seq])
columns = sorted(item.keys())
columns.remove('_id')
return columns, pluck(columns, seq)
@convert.register(Iterator, Collection, cost=500.0)
def collection_to_iterator(coll, columns=None, dshape=None, **kwargs):
columns, seq = _into_iter_mongodb(coll, columns=columns, dshape=dshape)
return seq
@append.register(Collection, Iterator)
def append_iterator_to_pymongo(coll, seq, columns=None, dshape=None, chunksize=1024, **kwargs):
seq = iter(seq)
item = next(seq)
seq = concat([[item], seq])
if isinstance(item, (tuple, list)):
if not columns and dshape:
columns = dshape.measure.names
if not columns:
raise ValueError("Inputs must be dictionaries. "
"Or provide columns=[...] or dshape=DataShape(...) keyword")
seq = (dict(zip(columns, item)) for item in seq)
for block in partition_all(1024, seq):
coll.insert(copy.deepcopy(block))
return coll
@append.register(Collection, object)
def append_anything_to_collection(coll, o, **kwargs):
return append(coll, convert(Iterator, o, **kwargs), **kwargs)
@resource.register(r'mongodb://\w*:\w*@\w*.*', priority=11)
def resource_mongo_with_authentication(uri, collection_name=None, **kwargs):
pattern = r'mongodb://(?P<user>\w*):(?P<pass>\w*)@(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
@resource.register(r'mongodb://.+')
def resource_mongo(uri, collection_name=None, **kwargs):
pattern = r'mongodb://(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
def _resource_mongo(d, collection_name=None):
client = pymongo.MongoClient(d['hostport'])
db = getattr(client, d['database'])
if d.get('user'):
db.authenticate(d['user'], d['pass'])
if collection_name is None:
return db
return getattr(db, collection_name)
@discover.register(pymongo.database.Database)
def discover_mongo_database(db):
names = db.collection_names()
return DataShape(Record(zip(names, (discover(getattr(db, name))
for name in names))))
ooc_types.add(Collection)
@dispatch(Collection)
def drop(m):
m.drop()
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/mongo.py",
"copies": "9",
"size": "3665",
"license": "bsd-3-clause",
"hash": 9214784505460507000,
"line_mean": 29.7983193277,
"line_max": 95,
"alpha_frac": 0.6622100955,
"autogenerated": false,
"ratio": 3.593137254901961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8755347350401961,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pymongo
from pymongo.collection import Collection
from collections import Iterator
from datashape import discover
from datashape.predicates import isdimension
from datashape.dispatch import dispatch
from toolz import take, partition_all, concat, pluck
import copy
from bson.objectid import ObjectId
import re
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
@discover.register(Collection)
def discover_pymongo_collection(coll, n=50):
items = list(take(n, coll.find()))
oid_cols = [k for k, v in items[0].items() if isinstance(v, ObjectId)]
for item in items:
for col in oid_cols:
del item[col]
ds = discover(items)
if isdimension(ds[0]):
return coll.count() * ds.subshape[0]
else:
raise ValueError("Consistent datashape not found")
def _into_iter_mongodb(coll, columns=None, dshape=None):
""" Into helper function
Return both a lazy sequence of tuples and a list of column names
"""
seq = coll.find()
if not columns and dshape:
columns = dshape.measure.names
elif not columns:
item = next(seq)
seq = concat([[item], seq])
columns = sorted(item.keys())
columns.remove('_id')
return columns, pluck(columns, seq)
@convert.register(Iterator, Collection, cost=500.0)
def collection_to_iterator(coll, columns=None, dshape=None, **kwargs):
columns, seq = _into_iter_mongodb(coll, columns=columns, dshape=dshape)
return seq
@append.register(Collection, Iterator)
def append_iterator_to_pymongo(coll, seq, columns=None, dshape=None, chunksize=1024, **kwargs):
seq = iter(seq)
item = next(seq)
seq = concat([[item], seq])
if isinstance(item, (tuple, list)):
if not columns and dshape:
columns = dshape.measure.names
if not columns:
raise ValueError("Inputs must be dictionaries. "
"Or provide columns=[...] or dshape=DataShape(...) keyword")
seq = (dict(zip(columns, item)) for item in seq)
for block in partition_all(1024, seq):
coll.insert(copy.deepcopy(block))
return coll
@append.register(Collection, object)
def append_anything_to_collection(coll, o, **kwargs):
return append(coll, convert(Iterator, o, **kwargs), **kwargs)
@resource.register('mongodb://\w*:\w*@\w*.*', priority=11)
def resource_mongo_with_authentication(uri, collection_name, **kwargs):
pattern = 'mongodb://(?P<user>\w*):(?P<pass>\w*)@(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
@resource.register('mongodb://.+')
def resource_mongo(uri, collection_name, **kwargs):
pattern = 'mongodb://(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
def _resource_mongo(d, collection_name):
client = pymongo.MongoClient(d['hostport'])
db = getattr(client, d['database'])
if d.get('user'):
db.authenticate(d['user'], d['pass'])
coll = getattr(db, collection_name)
return coll
ooc_types.add(Collection)
@dispatch(Collection)
def drop(m):
m.drop()
| {
"repo_name": "mrocklin/into",
"path": "into/backends/mongo.py",
"copies": "1",
"size": "3293",
"license": "bsd-3-clause",
"hash": 3599918313526012000,
"line_mean": 29.2110091743,
"line_max": 95,
"alpha_frac": 0.6665654418,
"autogenerated": false,
"ratio": 3.56,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47265654417999997,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from drms.utils import (
_pd_to_datetime_coerce, _pd_to_numeric_coerce,
_split_arg, _extract_series_name)
# test_pd_to_datetime
# test_pd_to_numeric
@pytest.mark.parametrize('in_obj, expected', [
('', []),
('asd', ['asd']),
('aa,bb,cc', ['aa', 'bb', 'cc']),
('aa, bb, cc', ['aa', 'bb', 'cc']),
(' aa,bb, cc, dd', ['aa', 'bb', 'cc', 'dd']),
('aa,\tbb,cc, dd ', ['aa', 'bb', 'cc', 'dd']),
(u'aa,\tbb,cc, dd ', [u'aa', u'bb', u'cc', u'dd']),
([], []),
(['a', 'b', 'c'], ['a', 'b', 'c']),
(('a', 'b', 'c'), ['a', 'b', 'c']),
])
def test_split_arg(in_obj, expected):
res = _split_arg(in_obj)
assert len(res) == len(expected)
for i in range(len(res)):
assert res[i] == expected[i]
@pytest.mark.parametrize('ds_string, expected', [
('hmi.v_45s', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/365d@1d]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/365d@1d][?QUALITY>=0?]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/1d@6h]{Dopplergram}', 'hmi.v_45s'),
])
def test_extract_series(ds_string, expected):
assert _extract_series_name(ds_string) == expected
| {
"repo_name": "kbg/drms",
"path": "drms/tests/test_utils.py",
"copies": "1",
"size": "1265",
"license": "mit",
"hash": -4438669692929368000,
"line_mean": 30.625,
"line_max": 69,
"alpha_frac": 0.5280632411,
"autogenerated": false,
"ratio": 2.4095238095238094,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8437587050623809,
"avg_score": 0,
"num_lines": 40
} |
from __future__ import absolute_import, division, print_function
import pytest
from mock import MagicMock
from glue.core import ComponentID
from glue.config import link_function, link_helper
from ..link_equation import (function_label, helper_label,
LinkEquation, ArgumentWidget)
@link_function('testing function', ['y'])
def func1(x):
return x
@link_function('testing function', ['a', 'b'])
def func2(x, z):
return x + z, x - z
@link_helper('test helper', ['a', 'b'])
def helper(x, y):
return [x, x, y]
def test_function_label():
f1 = [f for f in link_function if f[0] is func1][0]
f2 = [f for f in link_function if f[0] is func2][0]
assert function_label(f1) == "Link from x to y"
assert function_label(f2) == "Link from x, z to a, b"
def test_helper_label():
f1 = [h for h in link_helper if h[0] is helper][0]
assert helper_label(f1) == 'test helper'
class TestArgumentWidget(object):
def test_label(self):
a = ArgumentWidget('test')
assert a.label == 'test'
def test_set_label(self):
a = ArgumentWidget('test')
a.label = 'b'
assert a.label == 'b'
def test_drop(self):
target_id = ComponentID('test')
event = MagicMock()
event.mimeData().data.return_value = target_id
a = ArgumentWidget('test')
a.dropEvent(event)
assert a.component_id is target_id
assert a.editor_text == 'test'
def test_drop_invalid(self):
event = MagicMock()
event.mimeData().data.return_value = 5
a = ArgumentWidget('')
a.dropEvent(event)
assert a.component_id is None
def test_clear(self):
target_id = ComponentID('test')
event = MagicMock()
event.mimeData().data.return_value = target_id
a = ArgumentWidget('test')
a.dropEvent(event)
assert a.component_id is target_id
a.clear()
assert a.component_id is None
assert a.editor_text == ''
def test_drag_enter_accept(self):
event = MagicMock()
event.mimeData().hasFormat.return_value = True
a = ArgumentWidget('x')
a.dragEnterEvent(event)
event.accept.assert_called_once_with()
def test_drag_enter_ignore(self):
event = MagicMock()
event.mimeData().hasFormat.return_value = False
a = ArgumentWidget('x')
a.dragEnterEvent(event)
event.ignore.assert_called_once_with()
class TestLinkEquation(object):
def setup_method(self, method):
self.widget = LinkEquation()
def test_select_function_member(self):
member = link_function.members[1]
assert self.widget.function is not member
self.widget.function = member
assert self.widget.function is member
def test_select_function_helper(self):
member = link_helper.members[-1]
self.widget.function = member
assert self.widget.function is member
def test_select_invalid_function(self):
with pytest.raises(ValueError) as exc:
def bad(x):
pass
self.widget.function = (bad, None, None)
assert exc.value.args[0].startswith('Cannot find data')
def test_make_link_function(self):
widget = LinkEquation()
f1 = [f for f in link_function if f[0] is func1][0]
widget.function = f1
x, y = ComponentID('x'), ComponentID('y')
widget.signature = [x], y
links = widget.links()
assert len(links) == 1
assert links[0].get_from_ids() == [x]
assert links[0].get_to_id() == y
assert links[0].get_using() is func1
def test_make_link_helper(self):
widget = LinkEquation()
f1 = [f for f in link_helper if f[0] is helper][0]
widget.function = f1
x, y = ComponentID('x'), ComponentID('y')
widget.signature = [x, y], None
links = widget.links()
assert links == helper(x, y)
def test_links_empty(self):
assert LinkEquation().links() == []
def test_links_empty_helper(self):
widget = LinkEquation()
f1 = [f for f in link_helper if f[0] is helper][0]
widget.function = f1
assert widget.is_helper()
assert widget.links() == []
def test_clear_inputs(self):
widget = LinkEquation()
f1 = [f for f in link_helper if f[0] is helper][0]
widget.function = f1
x, y = ComponentID('x'), ComponentID('y')
widget.signature = [x, y], None
assert widget.signature == ([x, y], None)
widget.clear_inputs()
assert widget.signature == ([None, None], None)
def test_signal_connections(self):
# testing that signal-slot connections don't crash
widget = LinkEquation()
signal = widget._ui.function.currentIndexChanged
signal.emit(5)
signal = widget._output_widget.editor.textChanged
signal.emit('changing')
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/link_editor/qt/tests/test_link_equation.py",
"copies": "4",
"size": "4988",
"license": "bsd-3-clause",
"hash": -926783851932668300,
"line_mean": 29.2303030303,
"line_max": 64,
"alpha_frac": 0.598436247,
"autogenerated": false,
"ratio": 3.656891495601173,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6255327742601173,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from mock import MagicMock
from glue.core import Data, DataCollection
from glue.core.component_id import ComponentID
from glue.external.echo.selection import SelectionCallbackProperty
from glue.core.state_objects import State
from ..data_combo_helper import (ComponentIDComboHelper, ManualDataComboHelper,
DataCollectionComboHelper)
def selection_choices(state, property):
items = getattr(type(state), property).get_choice_labels(state)
return ":".join(items).replace('Coordinate components', 'coord').replace('Main components', 'main').replace('Derived components', 'derived')
class ExampleState(State):
combo = SelectionCallbackProperty()
def test_component_id_combo_helper():
state = ExampleState()
dc = DataCollection([])
helper = ComponentIDComboHelper(state, 'combo', dc)
assert selection_choices(state, 'combo') == ""
data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc.append(data1)
helper.append_data(data1)
assert selection_choices(state, 'combo') == "x:y"
data2 = Data(a=[1, 2, 3], b=['a', 'b', 'c'], label='data2')
dc.append(data2)
helper.append_data(data2)
assert selection_choices(state, 'combo') == "data1:x:y:data2:a:b"
helper.categorical = False
assert selection_choices(state, 'combo') == "data1:x:y:data2:a"
helper.numeric = False
assert selection_choices(state, 'combo') == "data1:data2"
helper.categorical = True
helper.numeric = True
helper.pixel_coord = True
assert selection_choices(state, 'combo') == "data1:main:x:y:coord:Pixel Axis 0 [x]:data2:main:a:b:coord:Pixel Axis 0 [x]"
helper.world_coord = True
assert selection_choices(state, 'combo') == "data1:main:x:y:coord:Pixel Axis 0 [x]:World 0:data2:main:a:b:coord:Pixel Axis 0 [x]:World 0"
helper.pixel_coord = False
assert selection_choices(state, 'combo') == "data1:main:x:y:coord:World 0:data2:main:a:b:coord:World 0"
helper.world_coord = False
dc.remove(data2)
assert selection_choices(state, 'combo') == "x:y"
data1['z'] = data1.id['x'] + 1
assert selection_choices(state, 'combo') == "main:x:y:derived:z"
helper.derived = False
assert selection_choices(state, 'combo') == "x:y"
data1.id['x'].label = 'z'
assert selection_choices(state, 'combo') == "z:y"
helper.remove_data(data1)
assert selection_choices(state, 'combo') == ""
def test_component_id_combo_helper_nocollection():
# Make sure that we can use use ComponentIDComboHelper without any
# data collection.
state = ExampleState()
data = Data(x=[1, 2, 3], y=[2, 3, 4], z=['a', 'b', 'c'], label='data1')
helper = ComponentIDComboHelper(state, 'combo', data=data)
assert selection_choices(state, 'combo') == "x:y:z"
helper.categorical = False
assert selection_choices(state, 'combo') == "x:y"
helper.numeric = False
assert selection_choices(state, 'combo') == ""
helper.categorical = True
assert selection_choices(state, 'combo') == "z"
helper.numeric = True
assert selection_choices(state, 'combo') == "x:y:z"
data2 = Data(a=[1, 2, 3], b=['a', 'b', 'c'], label='data2')
with pytest.raises(Exception) as exc:
helper.append_data(data2)
assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
with pytest.raises(Exception) as exc:
helper.remove_data(data2)
assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
with pytest.raises(Exception) as exc:
helper.set_multiple_data([data2])
assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
def test_component_id_combo_helper_init():
# Regression test to make sure that the numeric and categorical options
# in the __init__ are taken into account properly
state = ExampleState()
dc = DataCollection([])
data = Data(a=[1, 2, 3], b=['a', 'b', 'c'], label='data2')
dc.append(data)
helper = ComponentIDComboHelper(state, 'combo', dc)
helper.append_data(data)
assert selection_choices(state, 'combo') == "a:b"
helper = ComponentIDComboHelper(state, 'combo', dc, numeric=False)
helper.append_data(data)
assert selection_choices(state, 'combo') == "b"
helper = ComponentIDComboHelper(state, 'combo', dc, categorical=False)
helper.append_data(data)
assert selection_choices(state, 'combo') == "a"
helper = ComponentIDComboHelper(state, 'combo', dc, numeric=False, categorical=False)
helper.append_data(data)
assert selection_choices(state, 'combo') == ""
def test_component_id_combo_helper_replaced():
# Make sure that when components are replaced, the equivalent combo index
# remains selected and an event is broadcast so that any attached callback
# properties can be sure to pull the latest text/userData.
callback = MagicMock()
state = ExampleState()
state.add_callback('combo', callback)
dc = DataCollection([])
helper = ComponentIDComboHelper(state, 'combo', dc)
assert selection_choices(state, 'combo') == ""
data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
callback.reset_mock()
dc.append(data1)
helper.append_data(data1)
callback.assert_called_once_with(0)
callback.reset_mock()
assert selection_choices(state, 'combo') == "x:y"
new_id = ComponentID(label='new')
data1.update_id(data1.id['x'], new_id)
callback.assert_called_once_with(0)
callback.reset_mock()
assert selection_choices(state, 'combo') == "new:y"
def test_component_id_combo_helper_add():
# Make sure that when adding a component, and if a data collection is not
# present, the choices still get updated
callback = MagicMock()
state = ExampleState()
state.add_callback('combo', callback)
dc = DataCollection([])
helper = ComponentIDComboHelper(state, 'combo')
assert selection_choices(state, 'combo') == ""
data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
callback.reset_mock()
dc.append(data1)
helper.append_data(data1)
callback.assert_called_once_with(0)
callback.reset_mock()
assert selection_choices(state, 'combo') == "x:y"
data1.add_component([7, 8, 9], 'z')
# Should get notification since choices have changed
callback.assert_called_once_with(0)
callback.reset_mock()
assert selection_choices(state, 'combo') == "x:y:z"
def test_manual_data_combo_helper():
state = ExampleState()
dc = DataCollection([])
helper = ManualDataComboHelper(state, 'combo', dc)
data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc.append(data1)
assert selection_choices(state, 'combo') == ""
helper.append_data(data1)
assert selection_choices(state, 'combo') == "data1"
data1.label = 'mydata1'
assert selection_choices(state, 'combo') == "mydata1"
dc.remove(data1)
assert selection_choices(state, 'combo') == ""
def test_data_collection_combo_helper():
state = ExampleState()
dc = DataCollection([])
helper = DataCollectionComboHelper(state, 'combo', dc) # noqa
data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc.append(data1)
assert selection_choices(state, 'combo') == "data1"
data1.label = 'mydata1'
assert selection_choices(state, 'combo') == "mydata1"
dc.remove(data1)
assert selection_choices(state, 'combo') == ""
def test_component_id_combo_helper_rename():
# Make sure that renaming component IDs now propagates to the combo options
state = ExampleState()
data = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc = DataCollection([data])
helper = ComponentIDComboHelper(state, 'combo', dc) # noqa
helper.append_data(data)
assert selection_choices(state, 'combo') == "x:y"
data.id['x'].label = 'renamed'
assert selection_choices(state, 'combo') == "renamed:y"
def test_component_id_combo_helper_reorder():
# Make sure that renaming component IDs now propagates to the combo options
state = ExampleState()
data = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc = DataCollection([data])
helper = ComponentIDComboHelper(state, 'combo', dc) # noqa
helper.append_data(data)
assert selection_choices(state, 'combo') == "x:y"
data.reorder_components(data.components[::-1])
assert selection_choices(state, 'combo') == "y:x"
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/tests/test_data_combo_helper.py",
"copies": "1",
"size": "8773",
"license": "bsd-3-clause",
"hash": 6184511889141139000,
"line_mean": 25.9110429448,
"line_max": 144,
"alpha_frac": 0.6499487063,
"autogenerated": false,
"ratio": 3.4966121960940613,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9645344909746988,
"avg_score": 0.0002431985294146627,
"num_lines": 326
} |
from __future__ import absolute_import, division, print_function
import pytest
from mock import patch, MagicMock
from ..main import (die_on_error, restore_session, load_data_files,
main, start_glue)
from ..core import Data, DataCollection, Hub
def test_die_on_error_exception():
"""Decorator should spawn a QMessageBox and exit"""
with pytest.raises(SystemExit):
with patch('glue.utils.qt.QMessageBoxPatched') as qmb:
@die_on_error('test_msg')
def test():
raise Exception()
test()
assert qmb.call_count == 1
def test_die_on_error_noexception():
"""Decorator should have no effect"""
@die_on_error('test_msg')
def test():
return 0
assert test() == 0
def test_load_data_files():
with patch('glue.core.data_factories.load_data') as ld:
ld.return_value = Data()
dc = load_data_files(['test.py'])
assert len(dc) == 1
def check_main(cmd, glue, config, data):
"""Pass command to main program, check for expected parsing"""
with patch('glue.main.start_glue') as sg:
main(cmd.split())
args, kwargs = sg.call_args
assert kwargs.get('datafiles', None) == data
assert kwargs.get('gluefile', None) == glue
assert kwargs.get('config', None) == config
def check_exec(cmd, pyfile):
"""Assert that main correctly dispatches to execute_script"""
with patch('glue.main.execute_script') as es:
main(cmd.split())
args, kwargs = es.call_args
assert args[0] == pyfile
def test_main_single_data():
check_main('glueqt test.fits', None, None, ['test.fits'])
def test_main_multi_data():
check_main('glueqt test.fits t2.csv', None, None, ['test.fits', 't2.csv'])
def test_main_config():
check_main('glueqt -c config.py', None, 'config.py', None)
def test_main_glu_arg():
check_main('glueqt -g test.glu', 'test.glu', None, None)
def test_main_auto_glu():
check_main('glueqt test.glu', 'test.glu', None, None)
def test_main_many_args():
check_main('glueqt -c config.py data.fits d2.csv', None,
'config.py', ['data.fits', 'd2.csv'])
def test_exec():
check_exec('glueqt -x test.py', 'test.py')
def test_auto_exec():
check_exec('glueqt test.py', 'test.py')
@pytest.mark.parametrize(('cmd'), ['glueqt -g test.glu test.fits',
'glueqt -g test.py test.fits',
'glueqt -x test.py -g test.glu',
'glueqt -x test.py -c test.py',
'glueqt -x',
'glueqt -g',
'glueqt -c'])
def test_invalid(cmd):
with pytest.raises(SystemExit):
main(cmd.split())
@pytest.mark.parametrize(('glue', 'config', 'data'),
[('test.glu', None, None),
(None, 'test.py', None),
(None, None, ['test.fits']),
(None, None, ['a.fits', 'b.fits']),
(None, 'test.py', ['a.fits'])])
def test_start(glue, config, data):
with patch('glue.main.restore_session') as rs:
with patch('glue.config.load_configuration') as lc:
with patch('glue.main.load_data_files') as ldf:
with patch('glue.qt.glue_application.GlueApplication') as ga:
with patch('glue.external.qt.QtGui') as qt:
rs.return_value = ga
ldf.return_value = Data()
start_glue(glue, config, data)
if glue:
rs.assert_called_once_with(glue)
if config:
lc.assert_called_once_with(search_path=[config])
if data:
ldf.assert_called_once_with(data)
| {
"repo_name": "JudoWill/glue",
"path": "glue/tests/test_main.py",
"copies": "1",
"size": "3967",
"license": "bsd-3-clause",
"hash": 5824634957052378000,
"line_mean": 31.2520325203,
"line_max": 78,
"alpha_frac": 0.5336526342,
"autogenerated": false,
"ratio": 3.6394495412844035,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673102175484404,
"avg_score": 0,
"num_lines": 123
} |
from __future__ import absolute_import, division, print_function
import pytest
from odo.convert import (convert, list_to_numpy, iterator_to_numpy_chunks,
dataframe_to_chunks_dataframe, numpy_to_chunks_numpy,
chunks_dataframe_to_dataframe,
iterator_to_DataFrame_chunks)
from odo.chunks import chunks
from datashape import discover, dshape
from collections import Iterator
import datetime
import datashape
import numpy as np
import pandas as pd
import pandas.util.testing as tm
def test_basic():
assert convert(tuple, [1, 2, 3]) == (1, 2, 3)
def test_array_to_set():
assert convert(set, np.array([1, 2, 3])) == set([1, 2, 3])
def eq(a, b):
c = a == b
if isinstance(c, (np.ndarray, pd.Series)):
c = c.all()
return c
def test_Series_to_ndarray():
assert eq(convert(np.ndarray, pd.Series([1, 2, 3]), dshape='3 * float64'),
np.array([1.0, 2.0, 3.0]))
assert eq(convert(np.ndarray, pd.Series(['aa', 'bbb', 'ccccc']),
dshape='3 * string[5, "A"]'),
np.array(['aa', 'bbb', 'ccccc'], dtype='S5'))
assert eq(convert(np.ndarray, pd.Series(['aa', 'bbb', 'ccccc']),
dshape='3 * ?string'),
np.array(['aa', 'bbb', 'ccccc'], dtype='O'))
def test_Series_to_object_ndarray():
ds = datashape.dshape('{amount: float64, name: string, id: int64}')
expected = np.array([1.0, 'Alice', 3], dtype='object')
result = convert(np.ndarray, pd.Series(expected), dshape=ds)
np.testing.assert_array_equal(result, expected)
def test_Series_to_datetime64_ndarray():
s = pd.Series(pd.date_range(start='now', freq='N', periods=10).values)
expected = s.values
result = convert(np.ndarray, s.values)
np.testing.assert_array_equal(result, expected)
def test_set_to_Series():
assert eq(convert(pd.Series, set([1, 2, 3])),
pd.Series([1, 2, 3]))
def test_Series_to_set():
assert convert(set, pd.Series([1, 2, 3])) == set([1, 2, 3])
def test_dataframe_and_series():
s = pd.Series([1, 2, 3], name='foo')
df = convert(pd.DataFrame, s)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['foo']
s2 = convert(pd.Series, df)
assert isinstance(s2, pd.Series)
assert s2.name == 'foo'
def test_iterator_and_numpy_chunks():
c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
assert isinstance(c, chunks(np.ndarray))
assert all(isinstance(chunk, np.ndarray) for chunk in c)
c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
L = convert(list, c)
assert L == [1, 2, 3]
def test_list_to_numpy():
ds = datashape.dshape('3 * int32')
x = list_to_numpy([1, 2, 3], dshape=ds)
assert (x == [1, 2, 3]).all()
assert isinstance(x, np.ndarray)
ds = datashape.dshape('3 * ?int32')
x = list_to_numpy([1, None, 3], dshape=ds)
assert np.isnan(x[1])
def test_list_of_single_element_tuples_to_series():
data = [(1,), (2,), (3,)]
ds = datashape.dshape('3 * {id: int64}')
result = convert(pd.Series, data, dshape=ds)
expected = pd.Series([1, 2, 3], name='id')
tm.assert_series_equal(result, expected)
def test_cannot_convert_to_series_from_more_than_one_column():
data = [(1, 2), (2, 3), (3, 4)]
ds = datashape.dshape('3 * {id: int64, id2: int64}')
with pytest.raises(ValueError):
convert(pd.Series, data, dshape=ds)
def test_list_to_numpy_on_tuples():
data = [['a', 1], ['b', 2], ['c', 3]]
ds = datashape.dshape('var * (string[1], int32)')
x = list_to_numpy(data, dshape=ds)
assert convert(list, x) == [('a', 1), ('b', 2), ('c', 3)]
def test_list_to_numpy_on_dicts():
data = [{'name': 'Alice', 'amount': 100},
{'name': 'Bob', 'amount': 200}]
ds = datashape.dshape('var * {name: string[5], amount: int}')
x = list_to_numpy(data, dshape=ds)
assert convert(list, x) == [('Alice', 100), ('Bob', 200)]
def test_list_of_dicts_with_missing_to_numpy():
data = [{'name': 'Alice', 'amount': 100},
{'name': 'Bob'},
{'amount': 200}]
result = convert(np.ndarray, data)
assert result.dtype.names == ('amount', 'name')
expected = np.array([(100.0, 'Alice'),
(np.nan, 'Bob'),
(200.0, None)],
dtype=[('amount', 'float64'), ('name', 'O')])
assert np.all((result == expected) |
((result != result) & (expected != expected)))
def test_chunks_numpy_pandas():
x = np.array([('Alice', 100), ('Bob', 200)],
dtype=[('name', 'S7'), ('amount', 'i4')])
n = chunks(np.ndarray)([x, x])
pan = convert(chunks(pd.DataFrame), n)
num = convert(chunks(np.ndarray), pan)
assert isinstance(pan, chunks(pd.DataFrame))
assert all(isinstance(chunk, pd.DataFrame) for chunk in pan)
assert isinstance(num, chunks(np.ndarray))
assert all(isinstance(chunk, np.ndarray) for chunk in num)
def test_numpy_launders_python_types():
ds = datashape.dshape('3 * int32')
x = convert(np.ndarray, ['1', '2', '3'], dshape=ds)
assert convert(list, x) == [1, 2, 3]
def test_numpy_asserts_type_after_dataframe():
df = pd.DataFrame({'name': ['Alice'], 'amount': [100]})
ds = datashape.dshape('1 * {name: string[10, "ascii"], amount: int32}')
x = convert(np.ndarray, df, dshape=ds)
assert discover(x) == ds
def test_list_to_dataframe_without_datashape():
data = [('Alice', 100), ('Bob', 200)]
df = convert(pd.DataFrame, data)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) != ['Alice', 100]
assert convert(list, df) == data
def test_noop():
assert convert(list, [1, 2, 3]) == [1, 2, 3]
def test_generator_is_iterator():
g = (1 for i in range(3))
L = convert(list, g)
assert L == [1, 1, 1]
def test_list_of_lists_to_set_creates_tuples():
assert convert(set, [[1], [2]]) == set([(1,), (2,)])
def test_list_of_strings_to_set():
assert convert(set, ['Alice', 'Bob']) == set(['Alice', 'Bob'])
def test_datetimes_persist():
typs = [list, tuple, np.ndarray, tuple]
L = [datetime.datetime.now()] * 3
ds = discover(L)
x = L
for cls in typs:
x = convert(cls, x)
assert discover(x) == ds
def test_numpy_to_list_preserves_ns_datetimes():
x = np.array([(0, 0)], dtype=[('a', 'M8[ns]'), ('b', 'i4')])
assert convert(list, x) == [(datetime.datetime(1970, 1, 1, 0, 0), 0)]
def test_numpy_to_chunks_numpy():
x = np.arange(100)
c = numpy_to_chunks_numpy(x, chunksize=10)
assert isinstance(c, chunks(np.ndarray))
assert len(list(c)) == 10
assert eq(list(c)[0], x[:10])
def test_pandas_and_chunks_pandas():
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1., 2., 3., 4.]})
c = dataframe_to_chunks_dataframe(df, chunksize=2)
assert isinstance(c, chunks(pd.DataFrame))
assert len(list(c)) == 2
df2 = chunks_dataframe_to_dataframe(c)
tm.assert_frame_equal(df, df2)
def test_iterator_to_DataFrame_chunks():
data = ((0, 1), (2, 3), (4, 5), (6, 7))
df1 = pd.DataFrame(list(data))
df2 = iterator_to_DataFrame_chunks(data, chunksize=2, add_index=True)
df2 = pd.concat(df2, axis=0)
tm.assert_frame_equal(df1, df2)
df2 = convert(pd.DataFrame, data, chunksize=2, add_index=True)
tm.assert_almost_equal(df1, df2)
def test_recarray():
data = np.array([(1, 1.), (2, 2.)], dtype=[('a', 'i4'), ('b', 'f4')])
result = convert(np.recarray, data)
assert isinstance(result, np.recarray)
assert eq(result.a, data['a'])
result2 = convert(np.ndarray, data)
assert not isinstance(result2, np.recarray)
assert eq(result2, data)
def test_empty_iterator_to_chunks_dataframe():
ds = dshape('var * {x: int}')
result = convert(chunks(pd.DataFrame), iter([]), dshape=ds)
data = convert(pd.DataFrame, result)
assert isinstance(data, pd.DataFrame)
assert list(data.columns) == ['x']
def test_empty_iterator_to_chunks_ndarray():
ds = dshape('var * {x: int}')
result = convert(chunks(np.ndarray), iter([]), dshape=ds)
data = convert(np.ndarray, result)
assert isinstance(data, np.ndarray)
assert len(data) == 0
assert data.dtype.names == ('x',)
def test_chunks_of_lists_and_iterators():
L = [1, 2], [3, 4]
cl = chunks(list)(L)
assert convert(list, cl) == [1, 2, 3, 4]
assert list(convert(Iterator, cl)) == [1, 2, 3, 4]
assert len(list(convert(chunks(Iterator), cl))) == 2
def test_ndarray_to_df_preserves_field_names():
ds = dshape('2 * {a: int, b: int}')
arr = np.array([[0, 1], [2, 3]])
# dshape explicitly sets field names.
assert (convert(pd.DataFrame, arr, dshape=ds).columns == ['a', 'b']).all()
# no dshape is passed.
assert (convert(pd.DataFrame, arr).columns == [0, 1]).all()
def test_iterator_to_df():
ds = dshape('var * int32')
it = iter([1, 2, 3])
df = convert(pd.DataFrame, it, dshape=ds)
assert df[0].tolist() == [1, 2, 3]
it = iter([1, 2, 3])
df = convert(pd.DataFrame, it, dshape=None)
assert df[0].tolist() == [1, 2, 3]
it = iter([1, 2, 3])
df = convert(pd.DataFrame, it)
assert df[0].tolist() == [1, 2, 3]
| {
"repo_name": "quantopian/odo",
"path": "odo/tests/test_convert.py",
"copies": "4",
"size": "9291",
"license": "bsd-3-clause",
"hash": -577210499226597100,
"line_mean": 29.7649006623,
"line_max": 78,
"alpha_frac": 0.5849747067,
"autogenerated": false,
"ratio": 3.0214634146341464,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5606438121334146,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from operator import (add, sub, mul, floordiv, mod, pow, truediv, eq, ne, lt,
gt, le, ge, getitem)
from functools import partial
from datetime import datetime
from datashape.util.testing import assert_dshape_equal
from datashape.predicates import iscollection, isscalar
from blaze.expr import (
Field,
by,
common_subexpression,
cos,
count, # used in eval string
discover,
distinct,
exp,
join,
label, # used in eval string
isnan, # used in eval string
max,
merge,
min,
projection,
selection,
sum,
summary,
symbol,
transform,
)
from blaze.compatibility import PY3, builtins
from blaze.utils import raises
from datashape import dshape, int32, int64, Record, DataShape
from toolz import identity
def test_dshape():
t = symbol('t', 'var * {name: string, amount: int}')
assert t.dshape == dshape('var * {name: string, amount: int}')
def test_length():
t = symbol('t', '10 * {name: string, amount: int}')
s = symbol('s', 'var * {name:string, amount:int}')
assert t.dshape == dshape('10 * {name: string, amount: int}')
assert len(t) == 10
assert len(t.name) == 10
assert len(t[['name']]) == 10
assert len(t.sort('name')) == 10
assert len(t.head(5)) == 5
assert len(t.head(50)) == 10
with pytest.raises(ValueError):
len(s)
def test_symbol_eq():
assert not (symbol('t', 'var * {name: string}') ==
symbol('v', 'var * {name: string}'))
def test_symbol_name():
t = symbol('t', '10 * {people: string, amount: int}')
r = symbol('r', 'var * int64')
with pytest.raises(AttributeError):
t.name
with pytest.raises(AttributeError):
r.name
def test_shape():
t = symbol('t', 'var * {name: string, amount: int}')
assert t.shape
assert isinstance(t.shape, tuple)
assert len(t.shape) == 1
def test_eq():
assert symbol('t', 'var * {a: string, b: int}').isidentical(
symbol('t', 'var * {a: string, b: int}'),
)
assert not symbol('t', 'var * {b: string, a: int}').isidentical(
symbol('t', 'var * {a: string, b: int}'),
)
def test_column():
t = symbol('t', 'var * {name: string, amount: int}')
assert t.fields == ['name', 'amount']
assert eval(str(t.name)) == t.name
assert str(t.name) == "t.name"
with pytest.raises(AttributeError):
t.name.balance
with pytest.raises((NotImplementedError, ValueError)):
getitem(t, set('balance'))
def test_symbol_projection_failures():
t = symbol('t', '10 * {name: string, amount: int}')
with pytest.raises(ValueError):
t._project(['name', 'id'])
with pytest.raises(AttributeError):
t.foo
with pytest.raises(TypeError):
t._project(t.dshape)
def test_Projection():
t = symbol('t', 'var * {name: string, amount: int, id: int32}')
p = projection(t, ['amount', 'name'])
assert p.schema == dshape('{amount: int32, name: string}')
print(t['amount'].dshape)
print(dshape('var * int32'))
assert t['amount'].dshape == dshape('var * int32')
assert t['amount']._name == 'amount'
assert eval(str(p)).isidentical(p)
assert p._project(['amount', 'name']) == p[['amount', 'name']]
with pytest.raises(ValueError):
p._project('balance')
def test_Projection_retains_shape():
t = symbol('t', '5 * {name: string, amount: int, id: int32}')
assert_dshape_equal(
t[['name', 'amount']].dshape,
dshape('5 * {name: string, amount: int}')
)
def test_indexing():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
assert t[['amount', 'id']] == projection(t, ['amount', 'id'])
assert t['amount'].isidentical(Field(t, 'amount'))
def test_relational():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
r = (t['name'] == 'Alice')
assert 'bool' in str(r.dshape)
assert r._name
def test_selection():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
s = selection(t, t['name'] == 'Alice')
f = selection(t, t['id'] > t['amount'])
p = t[t['amount'] > 100]
with pytest.raises(ValueError):
selection(t, p)
assert s.dshape == t.dshape
def test_selection_typecheck():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
assert raises(TypeError, lambda: t[t['amount'] + t['id']])
assert raises(TypeError, lambda: t[t['name']])
def test_selection_by_indexing():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
result = t[t['name'] == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_by_getattr():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
result = t[t.name == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_path_check():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = t[t.name == 'Alice']
t3 = t2[t2.amount > 0]
def test_path_issue():
t = symbol('t', "var * {topic: string, word: string, result: ?float64}")
t2 = transform(t, sizes=t.result.map(lambda x: (x - MIN)*10/(MAX - MIN),
schema='float64', name='size'))
assert builtins.any(t2.sizes.isidentical(node) for node in t2.args)
def test_getattr_doesnt_override_properties():
t = symbol('t', 'var * {_subs: string, schema: string}')
assert callable(t._subs)
assert isinstance(t.schema, DataShape)
def test_dir_contains_columns():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
result = dir(t)
columns_set = set(t.fields)
assert set(result) & columns_set == columns_set
def test_selection_consistent_children():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
expr = t['name'][t['amount'] < 0]
assert list(expr.fields) == ['name']
def test_str():
import re
t = symbol('t', 'var * {name: string, amount: int, id: int}')
expr = t[t['amount'] < 0]['id'] * 2
assert '<class' not in str(expr)
assert not re.search('0x[0-9a-f]+', str(expr))
assert eval(str(expr)) == expr
assert '*' in str(expr)
def test_join():
t = symbol('t', 'var * {name: string, amount: int}')
s = symbol('t', 'var * {name: string, id: int}')
r = symbol('r', 'var * {name: string, amount: int}')
q = symbol('q', 'var * {name: int}')
j = join(t, s, 'name', 'name')
assert j.schema == dshape('{name: string, amount: int, id: int}')
assert join(t, s, 'name') == join(t, s, 'name')
assert join(t, s, 'name').on_left == 'name'
assert join(t, s, 'name').on_right == 'name'
assert join(t, r, ('name', 'amount')).on_left == ['name', 'amount']
with pytest.raises(TypeError):
join(t, q, 'name')
with pytest.raises(ValueError):
join(t, s, how='upside_down')
def test_join_different_on_right_left_columns():
t = symbol('t', 'var * {x: int, y: int}')
s = symbol('t', 'var * {a: int, b: int}')
j = join(t, s, 'x', 'a')
assert j.on_left == 'x'
assert j.on_right == 'a'
def test_joined_column_first_in_schema():
t = symbol('t', 'var * {x: int, y: int, z: int}')
s = symbol('s', 'var * {w: int, y: int}')
assert join(t, s).schema == dshape('{y: int, x: int, z: int, w: int}')
def test_outer_join():
t = symbol('t', 'var * {name: string, amount: int}')
s = symbol('t', 'var * {name: string, id: int}')
jleft = join(t, s, 'name', 'name', how='left')
jright = join(t, s, 'name', 'name', how='right')
jinner = join(t, s, 'name', 'name', how='inner')
jouter = join(t, s, 'name', 'name', how='outer')
js = [jleft, jright, jinner, jouter]
assert len(set(js)) == 4 # not equal
assert jinner.schema == dshape('{name: string, amount: int, id: int}')
assert jleft.schema == dshape('{name: string, amount: int, id: ?int}')
assert jright.schema == dshape('{name: string, amount: ?int, id: int}')
assert jouter.schema == dshape('{name: string, amount: ?int, id: ?int}')
# Default behavior
assert (join(t, s, 'name', 'name', how='inner') ==
join(t, s, 'name', 'name'))
def test_join_default_shared_columns():
t = symbol('t', 'var * {name: string, amount: int}')
s = symbol('t', 'var * {name: string, id: int}')
assert join(t, s) == join(t, s, 'name', 'name')
def test_multi_column_join():
a = symbol('a', 'var * {x: int, y: int, z: int}')
b = symbol('b', 'var * {w: int, x: int, y: int}')
j = join(a, b, ['x', 'y'])
assert set(j.fields) == set('wxyz')
assert j.on_left == j.on_right == ['x', 'y']
assert hash(j)
assert j.fields == ['x', 'y', 'z', 'w']
def test_traverse():
t = symbol('t', 'var * {name: string, amount: int}')
assert t in list(t._traverse())
expr = t.amount.sum()
trav = list(expr._traverse())
assert builtins.any(t.amount.isidentical(x) for x in trav)
def test_unary_ops():
t = symbol('t', 'var * {name: string, amount: int}')
expr = cos(exp(t['amount']))
assert 'cos' in str(expr)
assert '~' in str(~(t.amount > 0))
def test_reduction():
t = symbol('t', 'var * {name: string, amount: int32}')
r = sum(t['amount'])
assert r.dshape in (dshape('int64'),
dshape('{amount: int64}'),
dshape('{amount_sum: int64}'))
assert 'amount' not in str(t.count().dshape)
assert t.count().dshape[0] in (int32, int64)
assert 'int' in str(t.count().dshape)
assert 'int' in str(t.nunique().dshape)
assert 'string' in str(t['name'].max().dshape)
assert 'string' in str(t['name'].min().dshape)
assert 'string' not in str(t.count().dshape)
t = symbol('t', 'var * {name: string, amount: real, id: int}')
assert 'int' in str(t['id'].sum().dshape)
assert 'int' not in str(t['amount'].sum().dshape)
def test_reduction_name():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert (t.amount + t.id).sum()._name
def test_max_min_class():
t = symbol('t', 'var * {name: string, amount: int32}')
assert str(max(t).dshape) == '{name: string, amount: int32}'
assert str(min(t).dshape) == '{name: string, amount: int32}'
@pytest.fixture
def symsum():
t = symbol('t', 'var * {name: string, amount: int32}')
return t, t.amount.sum()
@pytest.fixture
def ds():
return dshape("var * {"
"transaction_key : int64,"
"user_from_key : int64,"
"user_to_key : int64,"
"date : int64,"
"value : float64"
"}")
def test_discover_dshape_symbol(ds):
t_ds = symbol('t', dshape=ds)
assert t_ds.fields is not None
class TestScalarArithmetic(object):
ops = {'+': add, '-': sub, '*': mul, '/': truediv, '//': floordiv, '%': mod,
'**': pow, '==': eq, '!=': ne, '<': lt, '>': gt, '<=': le, '>=': ge}
def test_scalar_arith(self, symsum):
def runner(f):
result = f(r, 1)
assert eval('r %s 1' % op).isidentical(result)
a = f(r, r)
b = eval('r %s r' % op)
assert a is b or a.isidentical(b)
result = f(1, r)
assert eval('1 %s r' % op).isidentical(result)
t, r = symsum
r = t.amount.sum()
for op, f in self.ops.items():
runner(f)
def test_scalar_usub(self, symsum):
t, r = symsum
result = -r
assert eval(str(result)).isidentical(result)
@pytest.mark.xfail
def test_scalar_uadd(self, symsum):
t, r = symsum
+r
def test_summary():
t = symbol('t', 'var * {id: int32, name: string, amount: int32}')
s = summary(total=t.amount.sum(), num=t.id.count())
assert s.dshape == dshape('{num: int32, total: int64}')
assert hash(s)
assert eval(str(s)).isidentical(s)
assert 'summary(' in str(s)
assert 'total=' in str(s)
assert 'num=' in str(s)
assert str(t.amount.sum()) in str(s)
assert not summary(total=t.amount.sum())._child.isidentical(t.amount.sum())
assert iscollection(summary(total=t.amount.sum() + 1)._child.dshape)
def test_reduction_arithmetic():
t = symbol('t', 'var * {id: int32, name: string, amount: int32}')
expr = t.amount.sum() + 1
assert eval(str(expr)).isidentical(expr)
def test_Distinct():
t = symbol('t', 'var * {name: string, amount: int32}')
r = distinct(t['name'])
print(r.dshape)
assert r.dshape == dshape('var * string')
assert r._name == 'name'
r = t.distinct()
assert r.dshape == t.dshape
def test_by():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
r = by(t['name'], total=sum(t['amount']))
print(r.schema)
assert isinstance(r.schema[0], Record)
assert str(r.schema[0]['name']) == 'string'
def test_by_summary():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
a = by(t['name'], sum=sum(t['amount']))
b = by(t['name'], summary(sum=sum(t['amount'])))
assert a.isidentical(b)
def test_by_summary_printing():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert (str(by(t.name, total=sum(t.amount))) ==
'by(t.name, total=sum(t.amount))')
def test_by_columns():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert len(by(t['id'], total=t['amount'].sum()).fields) == 2
assert len(by(t['id'], count=t['id'].count()).fields) == 2
print(by(t, count=t.count()).fields)
assert len(by(t, count=t.count()).fields) == 4
def test_sort():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
s = t.sort('amount', ascending=True)
print(str(s))
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
assert t['amount'].sort().key == 'amount'
def test_head():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
s = t.head(10)
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
def test_label():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
quantity = (t['amount'] + 100).label('quantity')
assert eval(str(quantity)).isidentical(quantity)
assert quantity.fields == ['quantity']
with pytest.raises(ValueError):
quantity['balance']
def test_map_label():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
c = t.amount.map(identity, schema='int32')
assert c.label('bar')._name == 'bar'
assert c.label('bar')._child.isidentical(c._child)
def test_columns():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert list(t.fields) == ['name', 'amount', 'id']
assert list(t['name'].fields) == ['name']
(t['amount'] + 1).fields
def test_relabel():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
rl = t.relabel({'name': 'NAME', 'id': 'ID'})
rlc = t['amount'].relabel({'amount': 'BALANCE'})
assert eval(str(rl)).isidentical(rl)
print(rl.fields)
assert rl.fields == ['NAME', 'amount', 'ID']
assert not isscalar(rl.dshape.measure)
assert isscalar(rlc.dshape.measure)
def test_relabel_join():
names = symbol('names', 'var * {first: string, last: string}')
siblings = join(names.relabel({'last': 'left'}),
names.relabel({'last': 'right'}), 'first')
assert siblings.fields == ['first', 'left', 'right']
def test_map():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
inc = lambda x: x + 1
assert isscalar(t['amount'].map(inc, schema='int').dshape.measure)
s = t['amount'].map(inc, schema='{amount: int}')
assert not isscalar(s.dshape.measure)
assert s.dshape == dshape('var * {amount: int}')
expr = (t[['name', 'amount']]
.map(identity, schema='{name: string, amount: int}'))
assert expr._name is None
@pytest.mark.xfail(reason="Not sure that we should even support this")
def test_map_without_any_info():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert iscolumn(t['amount'].map(inc, 'int'))
assert not iscolumn(t[['name', 'amount']].map(identity))
def test_apply():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
s = t['amount'].apply(sum, dshape='real')
r = t['amount'].apply(sum, dshape='3 * real')
assert s.dshape == dshape('real')
assert r.schema == dshape('real')
def test_symbol_printing_is_legible():
accounts = symbol('accounts',
'var * {name: string, balance: int, id: int}')
expr = (exp(accounts.balance * 10)) + accounts['id']
assert "exp(accounts.balance * 10)" in str(expr)
assert "+ accounts.id" in str(expr)
def test_merge():
t = symbol('t', 'int64')
p = symbol('p', 'var * {amount: int}')
accounts = symbol('accounts',
'var * {name: string, balance: int32, id: int32}')
new_amount = (accounts.balance * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c.fields == ['name', 'balance', 'new']
assert c.schema == dshape('{name: string, balance: int32, new: float64}')
d = merge(t, p)
assert d.fields == ['t', 'amount']
assert_dshape_equal(d.dshape, dshape('var * {t: int64, amount: int}'))
with pytest.raises(TypeError) as e:
merge(t, t)
assert str(e.value) == 'cannot merge all scalar expressions'
def test_merge_repeats():
accounts = symbol('accounts',
'var * {name: string, balance: int32, id: int32}')
with pytest.raises(ValueError):
merge(accounts, (accounts.balance + 1).label('balance'))
def test_merge_project():
accounts = symbol('accounts',
'var * {name: string, balance: int32, id: int32}')
new_amount = (accounts['balance'] * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c['new'].isidentical(new_amount)
assert c['name'].isidentical(accounts['name'])
assert c[['name', 'new']].isidentical(merge(accounts.name, new_amount))
inc = lambda x: x + 1
def test_subterms():
a = symbol('a', 'var * {x: int, y: int, z: int}')
assert list(a._subterms()) == [a]
assert set(a['x']._subterms()) == set([a, a['x']])
assert (set(a['x'].map(inc, 'int')._subterms()) ==
set([a, a['x'], a['x'].map(inc, 'int')]))
assert a in set((a['x'] + 1)._subterms())
def test_common_subexpression():
a = symbol('a', 'var * {x: int, y: int, z: int}')
assert common_subexpression(a).isidentical(a)
assert common_subexpression(a, a['x']).isidentical(a)
assert common_subexpression(a['y'] + 1, a['x']).isidentical(a)
assert common_subexpression(a['x'].map(inc, 'int'), a['x']).isidentical(
a['x'],
)
def test_schema_of_complex_interaction():
a = symbol('a', 'var * {x: int, y: int, z: int}')
expr = (a['x'] + a['y']) / a['z']
assert expr.schema == dshape('float64')
expr = expr.label('foo')
assert expr.schema == dshape('float64')
def iscolumn(x):
return isscalar(x.dshape.measure)
def test_iscolumn():
a = symbol('a', 'var * {x: int, y: int, z: int}')
assert not iscolumn(a)
assert iscolumn(a['x'])
assert not iscolumn(a[['x', 'y']])
assert not iscolumn(a[['x']])
assert iscolumn((a['x'] + a['y']))
assert iscolumn(a['x'].distinct())
assert not iscolumn(a[['x']].distinct())
assert not iscolumn(by(a['x'], total=a['y'].sum()))
assert iscolumn(a['x'][a['x'] > 1])
assert not iscolumn(a[['x', 'y']][a['x'] > 1])
assert iscolumn(a['x'].sort())
assert not iscolumn(a[['x', 'y']].sort())
assert iscolumn(a['x'].head())
assert not iscolumn(a[['x', 'y']].head())
assert iscolumn(symbol('b', 'int'))
assert not iscolumn(symbol('b', 'var * {x: int}'))
def test_discover():
ds = 'var * {x: int, y: int, z: int}'
a = symbol('a', ds)
assert discover(a) == dshape(ds)
def test_improper_selection():
t = symbol('t', 'var * {x: int, y: int, z: int}')
assert raises(Exception, lambda: t[t['x'] > 0][t.sort()[t['y' > 0]]])
def test_serializable():
t = symbol('t', 'var * {id: int, name: string, amount: int}')
import pickle
t2 = pickle.loads(pickle.dumps(t, protocol=pickle.HIGHEST_PROTOCOL))
assert t.isidentical(t2)
s = symbol('t', 'var * {id: int, city: string}')
expr = join(t[t.amount < 0], s).sort('id').city.head()
expr2 = pickle.loads(pickle.dumps(expr, protocol=pickle.HIGHEST_PROTOCOL))
assert expr.isidentical(expr2)
def test_symbol_coercion():
from datetime import date
t = symbol('t', 'var * {name: string, amount: int, timestamp: ?date}')
assert (t.amount + '10').rhs == 10
assert (t.timestamp < '2014-12-01').rhs == date(2014, 12, 1)
def test_isnan():
t = symbol('t', 'var * {name: string, amount: real, timestamp: ?date}')
for expr in [t.amount.isnan(), ~t.amount.isnan()]:
assert eval(str(expr)).isidentical(expr)
assert iscollection(t.amount.isnan().dshape)
assert 'bool' in str(t.amount.isnan().dshape)
def test_distinct_name():
t = symbol('t', 'var * {id: int32, name: string}')
assert t.name.isidentical(t['name'])
assert t.distinct().name.isidentical(t.distinct()['name'])
assert t.id.distinct()._name == 'id'
assert t.name._name == 'name'
def test_leaves():
t = symbol('t', 'var * {id: int32, name: string}')
v = symbol('v', 'var * {id: int32, city: string}')
x = symbol('x', 'int32')
assert t._leaves() == [t]
assert t.id._leaves() == [t]
assert by(t.name, count=t.id.nunique())._leaves() == [t]
assert join(t, v)._leaves() == [t, v]
assert join(v, t)._leaves() == [v, t]
assert (x + 1)._leaves() == [x]
@pytest.fixture
def t():
return symbol('t', 'var * {id: int, amount: float64, name: string}')
def funcname(x, y='<lambda>'):
if PY3:
return 'TestRepr.%s.<locals>.%s' % (x, y)
return 'test_symbol.%s' % y
class TestRepr(object):
def test_partial_lambda(self, t):
expr = t.amount.map(partial(lambda x, y: x + y, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_asschema=None, _name0=None)" %
funcname('test_partial_lambda'))
def test_lambda(self, t):
expr = t.amount.map(lambda x: x)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _asschema=None, _name0=None)" %
funcname('test_lambda'))
def test_partial(self, t):
def myfunc(x, y):
return x + y
expr = t.amount.map(partial(myfunc, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_asschema=None, _name0=None)" % funcname('test_partial',
'myfunc'))
def test_builtin(self, t):
expr = t.amount.map(datetime.fromtimestamp)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=datetime.fromtimestamp, _asschema=None,"
" _name0=None)")
def test_udf(self, t):
def myfunc(x):
return x + 1
expr = t.amount.map(myfunc)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _asschema=None,"
" _name0=None)" % funcname('test_udf', 'myfunc'))
def test_nested_partial(self, t):
def myfunc(x, y, z):
return x + y + z
f = partial(partial(myfunc, 2), 1)
expr = t.amount.map(f)
s = str(expr)
fn = funcname('test_nested_partial', 'myfunc')
assert s in ("Map(_child=t.amount, func=partial(partial(%s, 2), 1),"
" _asschema=None, _name0=None)" % fn,
# Py3.5 version does partial() folding!
"Map(_child=t.amount, func=partial(%s, 2, 1),"
" _asschema=None, _name0=None)" % fn)
def test_count_values():
t = symbol('t', 'var * {name: string, amount: int, city: string}')
assert t.name.count_values(sort=False).isidentical(
by(t.name, count=t.name.count()),
)
assert t.name.count_values(sort=True).isidentical(
by(t.name, count=t.name.count()).sort('count', ascending=False),
)
def test_dir():
t = symbol('t', 'var * {name: string, amount: int, dt: datetime}')
assert 'day' in dir(t.dt)
assert 'mean' not in dir(t.dt)
assert 'mean' in dir(t.amount)
assert 'like' not in dir(t[['amount', 'dt']])
assert 'any' not in dir(t.name)
def test_distinct_column():
t = symbol('t', 'var * {name: string, amount: int, dt: datetime}')
assert t.name.distinct().name.dshape == t.name.distinct().dshape
assert t.name.distinct().name.isidentical(t.name.distinct())
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/tests/test_symbol.py",
"copies": "3",
"size": "25185",
"license": "bsd-3-clause",
"hash": -857298987975311000,
"line_mean": 28.9109263658,
"line_max": 80,
"alpha_frac": 0.5617629541,
"autogenerated": false,
"ratio": 3.1115641215715346,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5173327075671534,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from sample_pb2 import MessageOfTypes
from mentor.protobuf import dict_to_protobuf, protobuf_to_dict
@pytest.fixture
def m():
m = MessageOfTypes()
m.dubl = 1.7e+308
m.flot = 3.4e+038
m.i32 = 2 ** 31 - 1 # 2147483647 #
m.i64 = 2 ** 63 - 1 # 0x7FFFFFFFFFFFFFFF
m.ui32 = 2 ** 32 - 1
m.ui64 = 2 ** 64 - 1
m.si32 = -1 * m.i32
m.si64 = -1 * m.i64
m.f32 = m.i32
m.f64 = m.i64
m.sf32 = m.si32
m.sf64 = m.si64
m.bol = True
m.strng = "string"
m.byts = b'\n\x14\x1e'
assert len(m.byts) == 3, len(m.byts)
m.nested.req = "req"
m.enm = MessageOfTypes.C # @UndefinedVariable
m.enmRepeated.extend([MessageOfTypes.A, MessageOfTypes.C])
m.range.extend(range(10))
return m
def compare(m, d, exclude=None):
i = 0
exclude = ['byts', 'nested', 'enm', 'enmRepeated'] + (exclude or [])
for i, field in enumerate(MessageOfTypes.DESCRIPTOR.fields): # @UndefinedVariable
if field.name not in exclude:
assert field.name in d, field.name
assert d[field.name] == getattr(
m, field.name), (field.name, d[field.name])
assert i > 0
assert m.byts == str(d['byts'])
assert d['nested'] == {'req': m.nested.req}
def test_basics(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
def test_use_enum_labels(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
assert d['enm'] == 'C'
assert d['enmRepeated'] == ['A', 'C']
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
d['enm'] = 'MEOW'
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
d['enm'] = 'A'
d['enmRepeated'] = ['B']
dict_to_protobuf(d, MessageOfTypes)
d['enmRepeated'] = ['CAT']
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
def test_repeated_enum(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
assert d['enmRepeated'] == ['A', 'C']
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
d['enmRepeated'] = ['MEOW']
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
def test_nested_repeated(m):
m.nestedRepeated.extend(
[MessageOfTypes.NestedType(req=str(i)) for i in range(10)])
d = protobuf_to_dict(m)
compare(m, d, exclude=['nestedRepeated'])
assert d['nestedRepeated'] == [{'req': str(i)} for i in range(10)]
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
def test_reverse(m):
m2 = dict_to_protobuf(protobuf_to_dict(m), MessageOfTypes)
assert m == m2
m2.dubl = 0
assert m2 != m
def test_incomplete(m):
d = protobuf_to_dict(m)
d.pop('dubl')
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m2.dubl == 0
assert m != m2
def test_non_strict(m):
d = protobuf_to_dict(m)
d['non_existing_field'] = 'data'
d['temporary_field'] = 'helping_state'
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
m2 = dict_to_protobuf(d, MessageOfTypes, strict=False)
with pytest.raises(AttributeError):
m2.temporary_field
def test_pass_instance(m):
d = protobuf_to_dict(m)
d['dubl'] = 1
m2 = dict_to_protobuf(d, m)
assert m is m2
assert m.dubl == 1
def test_container_mapping(m):
class mapping(dict):
pass
containers = [(MessageOfTypes.NestedType(), mapping),
(MessageOfTypes(), dict)]
m.nestedRepeated.extend([MessageOfTypes.NestedType(req='1')])
d = protobuf_to_dict(m, containers=containers)
m = dict_to_protobuf(d, containers=containers)
assert isinstance(d, dict)
assert isinstance(d['nested'], mapping)
assert isinstance(m, MessageOfTypes)
assert isinstance(m.nested, MessageOfTypes.NestedType)
def test_conditional_container_mapping(m):
class truedict(dict):
pass
class falsedict(dict):
pass
containers = [(MessageOfTypes(bol=True), truedict),
(MessageOfTypes(bol=False), falsedict),
(MessageOfTypes.NestedType(), dict)]
m.bol = True
d = protobuf_to_dict(m, containers=containers)
p = dict_to_protobuf(d, containers=containers)
assert isinstance(d, truedict)
assert isinstance(p, MessageOfTypes)
m.bol = False
d = protobuf_to_dict(m, containers=containers)
p = dict_to_protobuf(d, containers=containers)
assert isinstance(d, falsedict)
assert isinstance(p, MessageOfTypes)
def test_reverse_type_conversion(m):
class String(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
d = protobuf_to_dict(m)
d['strng'] = String('string')
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
| {
"repo_name": "lensacom/satyr",
"path": "mentor/tests/test_protobuf.py",
"copies": "1",
"size": "4976",
"license": "apache-2.0",
"hash": 7564918650586295000,
"line_mean": 24.6494845361,
"line_max": 86,
"alpha_frac": 0.6131430868,
"autogenerated": false,
"ratio": 3.113892365456821,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226739208626059,
"avg_score": 0.000059248726152387724,
"num_lines": 194
} |
from __future__ import absolute_import, division, print_function
import pytest
from sample_pb2 import MessageOfTypes
from proxo import dict_to_protobuf, protobuf_to_dict
@pytest.fixture
def m():
m = MessageOfTypes()
m.dubl = 1.7e+308
m.flot = 3.4e+038
m.i32 = 2 ** 31 - 1 # 2147483647 #
m.i64 = 2 ** 63 - 1 # 0x7FFFFFFFFFFFFFFF
m.ui32 = 2 ** 32 - 1
m.ui64 = 2 ** 64 - 1
m.si32 = -1 * m.i32
m.si64 = -1 * m.i64
m.f32 = m.i32
m.f64 = m.i64
m.sf32 = m.si32
m.sf64 = m.si64
m.bol = True
m.strng = 'string'
m.byts = b'\n\x14\x1e'
assert len(m.byts) == 3, len(m.byts)
m.nested.req = 'req'
m.enm = MessageOfTypes.C # @UndefinedVariable
m.enmRepeated.extend([MessageOfTypes.A, MessageOfTypes.C])
m.range.extend(range(10))
return m
def compare(m, d, exclude=None):
i = 0
exclude = ['byts', 'nested', 'enm', 'enmRepeated'] + (exclude or [])
for i, field in enumerate(MessageOfTypes.DESCRIPTOR.fields): # @UndefinedVariable
if field.name not in exclude:
assert field.name in d, field.name
assert d[field.name] == getattr(
m, field.name), (field.name, d[field.name])
assert i > 0
assert m.byts == d['byts']
assert d['nested'] == {'req': m.nested.req}
def test_basics(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
def test_use_enum_labels(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
assert d['enm'] == 'C'
assert d['enmRepeated'] == ['A', 'C']
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
d['enm'] = 'MEOW'
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
d['enm'] = 'A'
d['enmRepeated'] = ['B']
dict_to_protobuf(d, MessageOfTypes)
d['enmRepeated'] = ['CAT']
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
def test_repeated_enum(m):
d = protobuf_to_dict(m)
compare(m, d, ['nestedRepeated'])
assert d['enmRepeated'] == ['A', 'C']
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
d['enmRepeated'] = ['MEOW']
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
def test_nested_repeated(m):
m.nestedRepeated.extend(
[MessageOfTypes.NestedType(req=str(i)) for i in range(10)])
d = protobuf_to_dict(m)
compare(m, d, exclude=['nestedRepeated'])
assert d['nestedRepeated'] == [{'req': str(i)} for i in range(10)]
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
def test_reverse(m):
m2 = dict_to_protobuf(protobuf_to_dict(m), MessageOfTypes)
assert m == m2
m2.dubl = 0
assert m2 != m
def test_incomplete(m):
d = protobuf_to_dict(m)
d.pop('dubl')
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m2.dubl == 0
assert m != m2
def test_non_strict(m):
d = protobuf_to_dict(m)
d['non_existing_field'] = 'data'
d['temporary_field'] = 'helping_state'
with pytest.raises(KeyError):
dict_to_protobuf(d, MessageOfTypes)
m2 = dict_to_protobuf(d, MessageOfTypes, strict=False)
with pytest.raises(AttributeError):
m2.temporary_field
def test_pass_instance(m):
d = protobuf_to_dict(m)
d['dubl'] = 1
m2 = dict_to_protobuf(d, m)
assert m is m2
assert m.dubl == 1
def test_container_mapping(m):
class mapping(dict):
pass
containers = [(MessageOfTypes.NestedType(), mapping),
(MessageOfTypes(), dict)]
m.nestedRepeated.extend([MessageOfTypes.NestedType(req='1')])
d = protobuf_to_dict(m, containers=containers)
m = dict_to_protobuf(d, containers=containers)
assert isinstance(d, dict)
assert isinstance(d['nested'], mapping)
assert isinstance(m, MessageOfTypes)
assert isinstance(m.nested, MessageOfTypes.NestedType)
def test_conditional_container_mapping(m):
class truedict(dict):
pass
class falsedict(dict):
pass
containers = [(MessageOfTypes(bol=True), truedict),
(MessageOfTypes(bol=False), falsedict),
(MessageOfTypes.NestedType(), dict)]
m.bol = True
d = protobuf_to_dict(m, containers=containers)
p = dict_to_protobuf(d, containers=containers)
assert isinstance(d, truedict)
assert isinstance(p, MessageOfTypes)
m.bol = False
d = protobuf_to_dict(m, containers=containers)
p = dict_to_protobuf(d, containers=containers)
assert isinstance(d, falsedict)
assert isinstance(p, MessageOfTypes)
def test_reverse_type_conversion(m):
class String(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
d = protobuf_to_dict(m)
d['strng'] = String('string')
m2 = dict_to_protobuf(d, MessageOfTypes)
assert m == m2
| {
"repo_name": "kszucs/proxo",
"path": "proxo/tests/test_protobuf.py",
"copies": "1",
"size": "4961",
"license": "apache-2.0",
"hash": -705886448766327300,
"line_mean": 24.5721649485,
"line_max": 86,
"alpha_frac": 0.6125781093,
"autogenerated": false,
"ratio": 3.1142498430634022,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4226827952363402,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import csv as csv_module
from blaze.data import CSV, JSON
import subprocess
import tempfile
import json
import os
from blaze.utils import filetext, tmpfile, raises
from blaze.compatibility import PY3, PY2
from datashape import discover, dshape
from blaze import drop, into, create_index
from blaze.utils import assert_allclose
from blaze.resource import resource
no_mongoimport = pytest.mark.skipif(raises(OSError,
lambda : subprocess.Popen('mongoimport',
shell=os.name != 'nt',
stdout=subprocess.PIPE).wait()),
reason='mongoimport cannot be found')
@pytest.yield_fixture(scope='module')
def conn():
pymongo = pytest.importorskip('pymongo')
try:
c = pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip('No mongo server running')
else:
yield c
c.close()
@pytest.fixture
def db(conn):
return conn.test_db
@pytest.fixture
def tuple_data():
return [(1, 2), (10, 20), (100, 200)]
@pytest.fixture
def openargs():
d = {'mode': 'wb' if PY2 else 'w'}
if PY3:
d['newline'] = ''
return d
@pytest.yield_fixture
def file_name_colon(tuple_data, openargs):
with tmpfile('.csv') as filename:
with open(filename, **openargs) as f:
csv_module.writer(f, delimiter=':').writerows(tuple_data)
yield filename
@pytest.yield_fixture
def file_name(tuple_data, openargs):
with tmpfile('.csv') as filename:
with open(filename, **openargs) as f:
csv_module.writer(f).writerows(tuple_data)
yield filename
@pytest.yield_fixture
def empty_collec(db):
yield db.tmp_collection
db.tmp_collection.drop()
@pytest.fixture
def bank():
return [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
@pytest.yield_fixture
def bank_collec(db, bank):
coll = into(db.tmp_collection, bank)
yield coll
coll.drop()
def test_discover(bank_collec):
assert discover(bank_collec) == dshape('5 * {amount: int64, name: string}')
def test_into(empty_collec, bank):
lhs = set(into([], into(empty_collec, bank), columns=['name', 'amount']))
rhs = set([('Alice', 100), ('Alice', 200), ('Bob', 100), ('Bob', 200),
('Bob', 300)])
assert lhs == rhs
@pytest.yield_fixture
def mongo(db, bank):
db.tmp_collection.insert(bank)
yield db
db.tmp_collection.drop()
def test_drop(mongo):
drop(mongo.tmp_collection)
assert mongo.tmp_collection.count() == 0
@pytest.fixture
def bank_idx():
return [{'name': 'Alice', 'amount': 100, 'id': 1},
{'name': 'Alice', 'amount': 200, 'id': 2},
{'name': 'Bob', 'amount': 100, 'id': 3},
{'name': 'Bob', 'amount': 200, 'id': 4},
{'name': 'Bob', 'amount': 300, 'id': 5}]
@pytest.yield_fixture
def mongo_idx(db, bank_idx):
db.tmp_collection.insert(bank_idx)
yield db
db.tmp_collection.drop()
class TestCreateIndex(object):
def test_create_index(self, mongo_idx):
create_index(mongo_idx.tmp_collection, 'id')
assert 'id_1' in mongo_idx.tmp_collection.index_information()
def test_create_index_single_element_list(self, mongo_idx):
create_index(mongo_idx.tmp_collection, ['id'])
assert 'id_1' in mongo_idx.tmp_collection.index_information()
def test_create_composite_index(self, mongo_idx):
create_index(mongo_idx.tmp_collection, ['id', 'amount'])
assert 'id_1_amount_1' in mongo_idx.tmp_collection.index_information()
def test_create_composite_index_params(self, mongo_idx):
from pymongo import ASCENDING, DESCENDING
create_index(mongo_idx.tmp_collection,
[('id', ASCENDING), ('amount', DESCENDING)])
assert 'id_1_amount_-1' in mongo_idx.tmp_collection.index_information()
def test_fails_when_using_not_list_of_tuples_or_strings(self, mongo_idx):
from pymongo import DESCENDING
with pytest.raises(TypeError):
create_index(mongo_idx.tmp_collection, [['id', DESCENDING]])
def test_create_index_with_unique(self, mongo_idx):
coll = mongo_idx.tmp_collection
create_index(coll, 'id', unique=True)
assert coll.index_information()['id_1']['unique']
class TestCreateNamedIndex(object):
def test_create_index(self, mongo_idx):
create_index(mongo_idx.tmp_collection, 'id', name='idx_id')
assert 'idx_id' in mongo_idx.tmp_collection.index_information()
def test_create_index_single_element_list(self, mongo_idx):
create_index(mongo_idx.tmp_collection, ['id'], name='idx_id')
assert 'idx_id' in mongo_idx.tmp_collection.index_information()
def test_create_composite_index(self, mongo_idx):
create_index(mongo_idx.tmp_collection, ['id', 'amount'], name='c_idx')
assert 'c_idx' in mongo_idx.tmp_collection.index_information()
def test_create_composite_index_params(self, mongo_idx):
from pymongo import ASCENDING, DESCENDING
create_index(mongo_idx.tmp_collection,
[('id', ASCENDING), ('amount', DESCENDING)],
name='c_idx')
assert 'c_idx' in mongo_idx.tmp_collection.index_information()
def test_fails_when_using_not_list_of_tuples_or_strings(self, mongo_idx):
from pymongo import DESCENDING
with pytest.raises(TypeError):
create_index(mongo_idx.tmp_collection, [['id', DESCENDING]])
def test_create_index_with_unique(self, mongo_idx):
coll = mongo_idx.tmp_collection
create_index(coll, 'id', unique=True, name='c_idx')
assert coll.index_information()['c_idx']['unique']
@no_mongoimport
def test_csv_mongodb_load(db, file_name, empty_collec):
csv = CSV(file_name)
#with out header
# mongoimport -d test_db -c testcollection --type csv --file /Users/quasiben/test.csv --fields alpha,beta
# with collection([]) as coll:
# --ignoreBlanks
coll = empty_collec
copy_info = {
'dbname':db.name,
'coll': coll.name,
'abspath': csv._abspath,
'column_names': ','.join(csv.columns)
}
copy_cmd = """mongoimport -d {dbname} -c {coll} --type csv --file {abspath} --fields {column_names}"""
copy_cmd = copy_cmd.format(**copy_info)
ps = subprocess.Popen(copy_cmd, shell=os.name != 'nt',
stdout=subprocess.PIPE)
output = ps.stdout.read()
mongo_data = list(coll.find({}, {'_0': 1, '_id': 0}))
assert list(csv[:, '_0']) == [i['_0'] for i in mongo_data]
def test_csv_into_mongodb_colon_del(empty_collec, file_name_colon):
csv = CSV(file_name_colon)
coll = empty_collec
lhs = into(list, csv)
newcoll = into(coll, csv)
rhs = into(list, newcoll)
assert lhs == rhs
def test_csv_into_mongodb(empty_collec, file_name):
csv = CSV(file_name)
coll = empty_collec
res = into(coll, csv)
mongo_data = list(res.find({}, {'_0': 1, '_id': 0}))
assert list(csv[:, '_0']) == [i['_0'] for i in mongo_data]
def test_csv_into_mongodb_columns(empty_collec, file_name):
csv = CSV(file_name, schema='{x: int, y: int}')
coll = empty_collec
lhs = into(list, csv)
assert lhs == into(list, into(coll, csv))
def test_csv_into_mongodb_complex(empty_collec):
this_dir = os.path.dirname(__file__)
file_name = os.path.join(this_dir, 'dummydata.csv')
s = "{ Name : string, RegistrationDate : ?datetime, ZipCode : ?int64, Consts : ?float64 }"
csv = CSV(file_name, schema=s)
coll = empty_collec
into(coll, csv)
mongo_data = list(coll.find({}, {'_id': 0}))
# This assertion doesn't work due to python floating errors
# into(list, csv) == into(list, into(coll, csv))
assert_allclose([list(csv[0])], [[mongo_data[0][col] for col in csv.columns]])
assert_allclose([list(csv[9])], [[mongo_data[-1][col] for col in csv.columns]])
les_mis_data = {"nodes":[{"name":"Myriel","group":1},
{"name":"Napoleon","group":1},
{"name":"Mlle.Baptistine","group":1},
],
"links":[{"source":1,"target":0,"value":1},
{"source":2,"target":0,"value":8},
{"source":3,"target":0,"value":10},
],
}
@no_mongoimport
def test_json_into_mongodb(empty_collec):
with filetext(json.dumps(les_mis_data)) as filename:
dd = JSON(filename)
coll = empty_collec
into(coll,dd)
mongo_data = list(coll.find())
last = mongo_data[0]['nodes'][-1]
first = mongo_data[0]['nodes'][0]
first = (first['group'], first['name'])
last = (last['group'], last['name'])
assert dd.as_py()[1][-1] == last
assert dd.as_py()[1][0] == first
data = [{u'id': u'90742205-0032-413b-b101-ce363ba268ef',
u'name': u'Jean-Luc Picard',
u'posts': [{u'content': (u"There are some words I've known "
"since..."),
u'title': u'Civil rights'}],
u'tv_show': u'Star Trek TNG'},
{u'id': u'7ca1d1c3-084f-490e-8b47-2b64b60ccad5',
u'name': u'William Adama',
u'posts': [{u'content': u'The Cylon War is long over...',
u'title': u'Decommissioning speech'},
{u'content': u'Moments ago, this ship received...',
u'title': u'We are at war'},
{u'content': u'The discoveries of the past few days...',
u'title': u'The new Earth'}],
u'tv_show': u'Battlestar Galactica'},
{u'id': u'520df804-1c91-4300-8a8d-61c2499a8b0d',
u'name': u'Laura Roslin',
u'posts': [{u'content': u'I, Laura Roslin, ...',
u'title': u'The oath of office'},
{u'content': u'The Cylons have the ability...',
u'title': u'They look like us'}],
u'tv_show': u'Battlestar Galactica'}]
@no_mongoimport
def test_jsonarray_into_mongodb(empty_collec):
filename = tempfile.mktemp(".json")
with open(filename, "w") as f:
json.dump(data, f)
dd = JSON(filename, schema="3 * { id : string, name : string, "
"posts : var * { content : string, title : string },"
" tv_show : string }")
coll = empty_collec
into(coll,dd, json_array=True)
mongo_data = list(coll.find({}, {'_id': 0}))
assert mongo_data[0] == data[0]
@no_mongoimport
def test_resource(conn):
coll = resource('mongodb://localhost:27017/db::mycoll')
assert coll.name == 'mycoll'
assert coll.database.name == 'db'
assert coll.database.connection.host == 'localhost'
assert coll.database.connection.port == 27017
| {
"repo_name": "vitan/blaze",
"path": "blaze/tests/test_mongo.py",
"copies": "1",
"size": "11099",
"license": "bsd-3-clause",
"hash": -5910002439369789000,
"line_mean": 30.6210826211,
"line_max": 109,
"alpha_frac": 0.5899630597,
"autogenerated": false,
"ratio": 3.386939273725969,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4476902333425969,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import drms
@pytest.mark.jsoc
@pytest.mark.export
@pytest.mark.parametrize('method', ['url_quick', 'url'])
def test_export_asis_basic(jsoc_client_export, method):
r = jsoc_client_export.export(
'hmi.v_avg120[2150]{mean,power}', protocol='as-is', method=method,
requestor=False)
assert isinstance(r, drms.ExportRequest)
assert r.wait(timeout=60)
assert r.has_succeeded()
assert r.protocol == 'as-is'
assert len(r.urls) == 12 # 6 files per segment
for record in r.urls.record:
record = record.lower()
assert record.startswith('hmi.v_avg120[2150]')
assert (record.endswith('{mean}') or
record.endswith('{power}'))
for filename in r.urls.filename:
assert (filename.endswith('mean.fits') or
filename.endswith('power.fits'))
for url in r.urls.url:
assert (url.endswith('mean.fits') or
url.endswith('power.fits'))
| {
"repo_name": "kbg/drms",
"path": "drms/tests/online/test_jsoc_export.py",
"copies": "1",
"size": "1034",
"license": "mit",
"hash": 4402143745860950500,
"line_mean": 30.3333333333,
"line_max": 74,
"alpha_frac": 0.6373307544,
"autogenerated": false,
"ratio": 3.541095890410959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4678426644810959,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import mesos_pb2
from proxo import encode, decode, MessageProxy
from mesos import (CommandInfo, Cpus, Disk, FrameworkID,
FrameworkInfo, Mem, Offer, ResourcesMixin,
ScalarResource, TaskID, TaskInfo,
TaskStatus)
def test_encode_resources():
pb = encode(Cpus(0.1))
assert pb.scalar.value == 0.1
assert pb.name == 'cpus'
assert pb.type == mesos_pb2.Value.SCALAR
pb = encode(Mem(16))
assert pb.scalar.value == 16
assert pb.name == 'mem'
assert pb.type == mesos_pb2.Value.SCALAR
pb = encode(Disk(256))
assert pb.scalar.value == 256
assert pb.name == 'disk'
assert pb.type == mesos_pb2.Value.SCALAR
def test_encode_task_info_resources():
task = TaskInfo(name='test-task',
id=TaskID(value='test-task-id'),
resources=[Cpus(0.1), Mem(16)],
command=CommandInfo(value='testcmd'))
pb = encode(task)
assert pb.name == 'test-task'
assert pb.task_id.value == 'test-task-id'
assert pb.resources[0].name == 'cpus'
assert pb.resources[0].scalar.value == 0.1
assert pb.resources[1].name == 'mem'
assert pb.resources[1].scalar.value == 16
assert pb.command.value == 'testcmd'
def test_decode_framework_info():
message = mesos_pb2.FrameworkInfo(id=mesos_pb2.FrameworkID(value='test'))
wrapped = decode(message)
assert isinstance(wrapped, MessageProxy)
assert isinstance(wrapped, FrameworkInfo)
assert isinstance(wrapped.id, MessageProxy)
assert isinstance(wrapped.id, FrameworkID)
def test_scalar_resource_comparison():
r1 = ScalarResource(value=11.5)
assert r1 == ScalarResource(value=11.5)
assert r1 <= ScalarResource(value=11.5)
assert r1 >= ScalarResource(value=11.5)
assert r1 < ScalarResource(value=12)
assert r1 > ScalarResource(value=11)
assert r1 == 11.5
assert r1 <= 11.5
assert r1 >= 11.5
assert r1 < 12
assert r1 > 11
def test_scalar_resource_addition():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
s = r1 + r2
assert isinstance(s, ScalarResource)
assert s == ScalarResource(13.5)
assert s == 13.5
def test_scalar_resource_sum():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
r3 = ScalarResource(value=3)
s = sum([r1, r2, r3])
assert isinstance(s, ScalarResource)
assert s == ScalarResource(16.5)
assert s == 16.5
def test_scalar_resource_subtraction():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
s = r1 - r2
assert isinstance(s, ScalarResource)
assert s == ScalarResource(9.5)
assert s == 9.5
def test_scalar_resource_inplace_addition():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
r1 += r2
assert isinstance(r1, ScalarResource)
assert r1 == ScalarResource(13.5)
assert r1 == 13.5
def test_scalar_resource_inplace_subtraction():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
r1 -= r2
assert isinstance(r1, ScalarResource)
assert r1 == ScalarResource(9.5)
assert r1 == 9.5
def test_scalar_resource_multiplication():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
m = r1 * r2
assert isinstance(m, ScalarResource)
assert m == ScalarResource(23)
assert m == 23
def test_scalar_resource_division():
r1 = ScalarResource(value=11.5)
r2 = ScalarResource(value=2)
d = r1 / r2
assert isinstance(d, ScalarResource)
assert d == ScalarResource(5.75)
assert d == 5.75
def test_resources_mixin_comparison():
o1 = Offer(resources=[Cpus(1), Mem(128), Disk(0)])
o2 = Offer(resources=[Cpus(2), Mem(256), Disk(1024)])
t1 = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)])
t2 = TaskInfo(resources=[Cpus(1), Mem(256), Disk(512)])
t3 = TaskInfo(resources=[Cpus(0.5), Mem(256), Disk(512)])
assert o1.cpus == 1
assert o1.mem == 128
assert o2.cpus == 2
assert o2.disk == 1024
assert t1.cpus == 0.5
assert t1.mem == 128
assert t2.cpus == 1
assert t2.disk == 512
assert o1 == o1
assert o1 < o2
assert o1 <= o2
assert o2 > o1
assert o2 >= o1
assert t1 == t1
assert t1 < t2
assert t1 <= t2
assert t2 > t1
assert t2 >= t1
assert o1 >= t1
assert o2 >= t1
assert o2 >= t2
assert t2 >= o1
assert t3 > o1
assert t3 <= t2
assert t3 > t1
def test_resources_mixin_addition():
o = Offer(resources=[Cpus(1), Mem(128), Disk(0)])
t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)])
s = o + t
assert isinstance(s, ResourcesMixin)
assert s.cpus == Cpus(1.5)
assert s.cpus == 1.5
assert s.mem == Mem(256)
assert s.mem == 256
assert s.disk == Disk(0)
assert s.disk == 0
def test_resources_mixin_sum():
o1 = Offer(resources=[Cpus(1), Mem(128), Disk(0)])
o2 = Offer(resources=[Cpus(2), Mem(128), Disk(100)])
o3 = Offer(resources=[Cpus(0.5), Mem(256), Disk(200)])
s = sum([o1, o2, o3])
assert isinstance(s, ResourcesMixin)
assert s.cpus == Cpus(3.5)
assert s.cpus == 3.5
assert s.mem == Mem(512)
assert s.mem == 512
assert s.disk == Disk(300)
assert s.disk == 300
def test_resources_mixin_subtraction():
o = Offer(resources=[Cpus(1), Mem(128), Disk(0)])
t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)])
s = o - t
assert isinstance(s, ResourcesMixin)
assert s.cpus == Cpus(0.5)
assert s.cpus == 0.5
assert s.mem == Mem(0)
assert s.mem == 0
assert s.disk == Disk(0)
assert s.disk == 0
def test_resources_mixin_inplace_addition():
o = Offer(resources=[Cpus(1), Mem(128), Disk(64)])
t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)])
o += t
assert isinstance(o, Offer)
assert o.cpus == Cpus(1.5)
assert o.cpus == 1.5
assert o.mem == Mem(256)
assert o.mem == 256
assert o.disk == Disk(64)
assert o.disk == 64
def test_resources_mixin_inplace_subtraction():
o = Offer(resources=[Cpus(1), Mem(128), Disk(64)])
t = TaskInfo(resources=[Cpus(0.5), Mem(128), Disk(0)])
o -= t
assert isinstance(o, Offer)
assert o.cpus == Cpus(0.5)
assert o.cpus == 0.5
assert o.mem == Mem(0)
assert o.mem == 0
assert o.disk == Disk(64)
assert o.disk == 64
def test_status_in_task_info():
t = TaskInfo(name='test-task',
id=TaskID(value='test-task-id'),
resources=[Cpus(0.1), Mem(16)],
command=CommandInfo(value='echo 100'))
assert isinstance(t.status, TaskStatus)
assert t.status.state == 'TASK_STAGING'
p = encode(t)
assert isinstance(p, mesos_pb2.TaskInfo)
with pytest.raises(AttributeError):
p.status
def test_encode_task_info():
t = TaskInfo(name='test-task',
id=TaskID(value='test-task-id'),
resources=[Cpus(0.1), Mem(16)],
command=CommandInfo(value='echo 100'))
p = encode(t)
assert isinstance(p, mesos_pb2.TaskInfo)
assert p.command.value == 'echo 100'
assert p.name == 'test-task'
assert p.resources[0].name == 'cpus'
assert p.resources[0].scalar.value == 0.1
assert p.task_id.value == 'test-task-id'
def test_non_strict_encode_task_info():
t = TaskInfo(name='test-task',
id=TaskID(value='test-task-id'),
resources=[Cpus(0.1), Mem(16)],
command=CommandInfo(value='echo 100'))
t.result = 'some binary data'
t.status = TaskStatus()
p = encode(t)
assert isinstance(p, mesos_pb2.TaskInfo)
assert p.command.value == 'echo 100'
with pytest.raises(AttributeError):
p.status
| {
"repo_name": "kszucs/proxo",
"path": "proxo/tests/test_mesos.py",
"copies": "1",
"size": "7866",
"license": "apache-2.0",
"hash": -3259916911450432000,
"line_mean": 25.6644067797,
"line_max": 77,
"alpha_frac": 0.6117467582,
"autogenerated": false,
"ratio": 3.1565008025682184,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9268247560768219,
"avg_score": 0,
"num_lines": 295
} |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from mock import MagicMock
from glue.external.six.moves import range as xrange
from glue import core
from .. import command as c
from .. import roi
from ..data_factories import tabular_data
from .util import simple_session, simple_catalog
class TestCommandStack(object):
def setup_method(self, method):
self.session = simple_session()
self.stack = self.session.command_stack
def make_command(self):
return MagicMock(c.Command)
def make_data(self):
with simple_catalog() as path:
cmd = c.LoadData(path=path, factory=tabular_data)
data = self.stack.do(cmd)
return data
def test_do(self):
c1 = self.make_command()
self.stack.do(c1)
c1.do.assert_called_once_with(self.session)
def test_undo(self):
c1, c2 = self.make_command(), self.make_command()
self.stack.do(c1)
self.stack.do(c2)
self.stack.undo()
c2.undo.assert_called_once_with(self.session)
self.stack.undo()
c1.undo.assert_called_once_with(self.session)
def test_redo(self):
c1, c2 = self.make_command(), self.make_command()
self.stack.do(c1)
self.stack.do(c2)
self.stack.undo()
self.stack.redo()
c2.undo.assert_called_once_with(self.session)
assert c2.do.call_count == 2
assert c2.undo.call_count == 1
assert c1.do.call_count == 1
assert c1.undo.call_count == 0
def test_max_undo(self):
cmds = [self.make_command() for _ in xrange(c.MAX_UNDO + 1)]
for cmd in cmds:
self.stack.do(cmd)
for cmd in cmds[:-1]:
self.stack.undo()
with pytest.raises(IndexError):
self.stack.undo()
def test_invalid_redo(self):
with pytest.raises(IndexError) as exc:
self.stack.redo()
assert exc.value.args[0] == 'No commands to redo'
def test_load_data(self):
data = self.make_data()
np.testing.assert_array_equal(data['a'], [1, 3])
def test_add_data(self):
data = self.make_data()
cmd = c.AddData(data=data)
self.stack.do(cmd)
assert len(self.session.data_collection) == 1
self.stack.undo()
assert len(self.session.data_collection) == 0
def test_remove_data(self):
data = self.make_data()
add = c.AddData(data=data)
remove = c.RemoveData(data=data)
self.stack.do(add)
assert len(self.session.data_collection) == 1
self.stack.do(remove)
assert len(self.session.data_collection) == 0
self.stack.undo()
assert len(self.session.data_collection) == 1
def test_new_data_viewer(self):
cmd = c.NewDataViewer(viewer=None, data=None)
v = self.stack.do(cmd)
self.session.application.new_data_viewer.assert_called_once_with(
None, None)
self.stack.undo()
v.close.assert_called_once_with(warn=False)
def test_apply_roi(self):
x = core.Data(x=[1, 2, 3])
s = x.new_subset()
dc = self.session.data_collection
dc.append(x)
r = MagicMock(roi.Roi)
client = MagicMock(core.client.Client)
client.data = dc
cmd = c.ApplyROI(data_collection=dc, roi=r,
apply_func=client.apply_roi)
self.stack.do(cmd)
client.apply_roi.assert_called_once_with(r)
old_state = s.subset_state
s.subset_state = MagicMock(spec_set=core.subset.SubsetState)
self.stack.undo()
assert s.subset_state is old_state
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/tests/test_command.py",
"copies": "2",
"size": "3732",
"license": "bsd-3-clause",
"hash": -4717385627639079000,
"line_mean": 25.2816901408,
"line_max": 73,
"alpha_frac": 0.597266881,
"autogenerated": false,
"ratio": 3.445983379501385,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043250260501385,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from glue.external.six import string_types, PY2
from ..array import (view_shape, coerce_numeric, stack_view, unique,
shape_to_string, check_sorted, pretty_number)
@pytest.mark.parametrize(('before', 'ref_after', 'ref_indices'),
(([2.2, 5, 4, 4, 2, 8.3, 2.2], [2, 2.2, 4, 5, 8.3], [1, 3, 2, 2, 0, 4, 1]),
([2.2, 5, np.nan, 2, 8.3, 2.2], [2, 2.2, 5, 8.3], [1, 2, -1, 0, 3, 1])))
def test_unique(before, ref_after, ref_indices):
after, indices = unique(before)
np.testing.assert_array_equal(after, ref_after)
np.testing.assert_array_equal(indices, ref_indices)
def test_shape_to_string():
assert shape_to_string((1, 4, 3)) == "(1, 4, 3)"
@pytest.mark.skipif("not PY2")
def test_shape_to_string_long():
# Shape includes long ints on Windows
assert shape_to_string((long(1), long(4), long(3))) == "(1, 4, 3)"
def test_view_shape():
assert view_shape((10, 10), np.s_[:]) == (10, 10)
assert view_shape((10, 10, 10), np.s_[:]) == (10, 10, 10)
assert view_shape((10, 10), np.s_[:, 1]) == (10,)
assert view_shape((10, 10), np.s_[2:3, 2:3]) == (1, 1)
assert view_shape((10, 10), None) == (10, 10)
assert view_shape((10, 10), ([1, 2, 3], [2, 3, 4])) == (3,)
def test_coerce_numeric():
x = np.array(['1', '2', '3.14', '4'], dtype=str)
np.testing.assert_array_equal(coerce_numeric(x),
[1, 2, 3.14, 4])
x = np.array([1, 2, 3])
assert coerce_numeric(x) is x
x = np.array([0, 1, 1, 0], dtype=bool)
np.testing.assert_array_equal(coerce_numeric(x), np.array([0, 1, 1, 0], dtype=np.int))
@pytest.mark.parametrize(('shape', 'views'),
[
[(5, 5), (np.s_[0:3],)],
[(5, 4), (np.s_[0:3],)],
[(5, 4), ((3, 2),)],
[(5, 4), (np.s_[0:4], np.s_[:, 0:2])],
[(5, 4), (np.s_[0:3, 0:2], 'transpose', (0, 0))],
[(10, 20), (np.random.random((10, 20)) > 0.1, 3)],
[(5, 7), ('transpose', (3, 2))],
])
def test_stack_view(shape, views):
x = np.random.random(shape)
exp = x
for v in views:
if isinstance(v, string_types) and v == 'transpose':
exp = exp.T
else:
exp = exp[v]
actual = x[stack_view(shape, *views)]
np.testing.assert_array_equal(exp, actual)
@pytest.mark.parametrize(('array', 'is_sorted'),
(([1, 3, 4, 3], False), ([1, 2, np.nan, 3], True), ([1, 3, 4, 4.1], True)))
def test_check_sorted(array, is_sorted):
assert check_sorted(array) is is_sorted
class TestPrettyNumber(object):
def test_single(self):
assert pretty_number([1]) == ['1']
assert pretty_number([0]) == ['0']
assert pretty_number([-1]) == ['-1']
assert pretty_number([1.0001]) == ['1']
assert pretty_number([1.01]) == ['1.01']
assert pretty_number([1e-5]) == ['1.000e-05']
assert pretty_number([1e5]) == ['1.000e+05']
assert pretty_number([3.3]) == ['3.3']
assert pretty_number([1.]) == ['1']
assert pretty_number([1.200]) == ['1.2']
def test_large(self):
# Regression test or a bug that caused trailing zeros in exponent to
# be removed.
assert pretty_number([1e9]) == ['1.000e+09']
assert pretty_number([2e10]) == ['2.000e+10']
assert pretty_number([3e11]) == ['3.000e+11']
def test_list(self):
assert pretty_number([1, 2, 3.3, 1e5]) == ['1', '2', '3.3',
'1.000e+05']
| {
"repo_name": "saimn/glue",
"path": "glue/utils/tests/test_array.py",
"copies": "1",
"size": "3817",
"license": "bsd-3-clause",
"hash": -7857257402498934000,
"line_mean": 35.0094339623,
"line_max": 100,
"alpha_frac": 0.4954152476,
"autogenerated": false,
"ratio": 3.015007898894155,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.900849427850887,
"avg_score": 0.0003857735970569896,
"num_lines": 106
} |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from qtpy import QtCore, QtGui
from glue.utils.qt import get_qapp
from qtpy.QtCore import Qt
from glue.core import Data, DataCollection, Session
from glue.utils.qt import qt4_to_mpl_color
from glue.app.qt import GlueApplication
from ..viewer_widget import DataTableModel, TableWidget
from glue.core.edit_subset_mode import (EditSubsetMode, AndNotMode, OrMode,
ReplaceMode)
class TestDataTableModel():
def setup_method(self, method):
self.data = Data(x=[1, 2, 3, 4], y=[2, 3, 4, 5])
self.model = DataTableModel(self.data)
def test_column_count(self):
assert self.model.columnCount() == 2
def test_column_count_hidden(self):
self.model.show_hidden = True
assert self.model.columnCount() == 4
def test_header_data(self):
for i, c in enumerate(self.data.visible_components):
result = self.model.headerData(i, Qt.Horizontal, Qt.DisplayRole)
assert result == c.label
for i in range(self.data.size):
result = self.model.headerData(i, Qt.Vertical, Qt.DisplayRole)
assert result == str(i)
def test_row_count(self):
assert self.model.rowCount() == 4
def test_data(self):
for i, c in enumerate(self.data.visible_components):
for j in range(self.data.size):
idx = self.model.index(j, i)
result = self.model.data(idx, Qt.DisplayRole)
assert float(result) == self.data[c, j]
@pytest.mark.xfail
def test_data_2d(self):
self.data = Data(x=[[1, 2], [3, 4]], y=[[2, 3], [4, 5]])
self.model = DataTableModel(self.data)
for i, c in enumerate(self.data.visible_components):
for j in range(self.data.size):
idx = self.model.index(j, i)
result = self.model.data(idx, Qt.DisplayRole)
assert float(result) == self.data[c].ravel()[j]
def check_values_and_color(model, data, colors):
for i in range(len(colors)):
for j, colname in enumerate('abc'):
# Get index of cell
idx = model.index(i, j)
# Check values
value = model.data(idx, Qt.DisplayRole)
assert value == str(data[colname][i])
# Check colors
brush = model.data(idx, Qt.BackgroundRole)
if colors[i] is None:
assert brush is None
else:
assert qt4_to_mpl_color(brush.color()) == colors[i]
def test_table_widget(tmpdir):
# Start off by creating a glue application instance with a table viewer and
# some data pre-loaded.
app = get_qapp()
d = Data(a=[1, 2, 3, 4, 5],
b=[3.2, 1.2, 4.5, 3.3, 2.2],
c=['e', 'b', 'c', 'a', 'f'])
dc = DataCollection([d])
gapp = GlueApplication(dc)
widget = gapp.new_data_viewer(TableWidget)
widget.add_data(d)
subset_mode = EditSubsetMode()
# Create two subsets
sg1 = dc.new_subset_group('D <= 3', d.id['a'] <= 3)
sg1.style.color = '#aa0000'
sg2 = dc.new_subset_group('1 < D < 4', (d.id['a'] > 1) & (d.id['a'] < 4))
sg2.style.color = '#0000cc'
model = widget.ui.table.model()
# We now check what the data and colors of the table are, and try various
# sorting methods to make sure that things are still correct.
data = {
'a': [1, 2, 3, 4, 5],
'b': [3.2, 1.2, 4.5, 3.3, 2.2],
'c': ['e', 'b', 'c', 'a', 'f']
}
colors = ['#aa0000', '#380088', '#380088', None, None]
check_values_and_color(model, data, colors)
model.sort(1, Qt.AscendingOrder)
data = {
'a': [2, 5, 1, 4, 3],
'b': [1.2, 2.2, 3.2, 3.3, 4.5],
'c': ['b', 'f', 'e', 'a', 'c']
}
colors = ['#380088', None, '#aa0000', None, '#380088']
check_values_and_color(model, data, colors)
model.sort(2, Qt.AscendingOrder)
data = {
'a': [4, 2, 3, 1, 5],
'b': [3.3, 1.2, 4.5, 3.2, 2.2],
'c': ['a', 'b', 'c', 'e', 'f']
}
colors = [None, '#380088', '#380088', '#aa0000', None]
check_values_and_color(model, data, colors)
model.sort(0, Qt.DescendingOrder)
data = {
'a': [5, 4, 3, 2, 1],
'b': [2.2, 3.3, 4.5, 1.2, 3.2],
'c': ['f', 'a', 'c', 'b', 'e']
}
colors = [None, None, '#380088', '#380088', '#aa0000']
check_values_and_color(model, data, colors)
model.sort(0, Qt.AscendingOrder)
# We now modify the subsets using the table.
selection = widget.ui.table.selectionModel()
widget.toolbar.actions['table:rowselect'].toggle()
def press_key(key):
event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, key, Qt.NoModifier)
app.postEvent(widget.ui.table, event)
app.processEvents()
app.processEvents()
# We now use key presses to navigate down to the third row
press_key(Qt.Key_Tab)
press_key(Qt.Key_Down)
press_key(Qt.Key_Down)
indices = selection.selectedRows()
# We make sure that the third row is selected
assert len(indices) == 1
assert indices[0].row() == 2
# At this point, the subsets haven't changed yet
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 0, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 1, 0, 0])
# We specify that we are editing the second subset, and use a 'not' logical
# operation to remove the currently selected line from the second subset.
d.edit_subset = [d.subsets[1]]
subset_mode.mode = AndNotMode
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 0, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
# At this point, the selection should be cleared
indices = selection.selectedRows()
assert len(indices) == 0
# We move to the fourth row and now do an 'or' selection with the first
# subset.
press_key(Qt.Key_Down)
subset_mode.mode = OrMode
d.edit_subset = [d.subsets[0]]
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 1, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
# Finally we move to the fifth row and deselect all subsets so that
# pressing enter now creates a new subset.
press_key(Qt.Key_Down)
subset_mode.mode = ReplaceMode
d.edit_subset = None
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 1, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
np.testing.assert_equal(d.subsets[2].to_mask(), [0, 0, 0, 0, 1])
# Make the color for the new subset deterministic
dc.subset_groups[2].style.color = '#bababa'
# Now finally check saving and restoring session
session_file = tmpdir.join('table.glu').strpath
gapp.save_session(session_file)
gapp2 = GlueApplication.restore_session(session_file)
gapp2.show()
d = gapp2.data_collection[0]
widget2 = gapp2.viewers[0][0]
model2 = widget2.ui.table.model()
data = {
'a': [1, 2, 3, 4, 5],
'b': [3.2, 1.2, 4.5, 3.3, 2.2],
'c': ['e', 'b', 'c', 'a', 'f']
}
# Need to take into account new selections above
colors = ['#aa0000', '#380088', '#aa0000', "#aa0000", "#bababa"]
check_values_and_color(model2, data, colors)
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/table/qt/tests/test_viewer_widget.py",
"copies": "2",
"size": "7490",
"license": "bsd-3-clause",
"hash": -863099919306693200,
"line_mean": 27.0524344569,
"line_max": 79,
"alpha_frac": 0.5803738318,
"autogenerated": false,
"ratio": 3.122134222592747,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4702508054392747,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import pytest
import numpy as np
import mocsy
@pytest.fixture
def scalar_variables():
"""
Functions return 1-dimensional numpy arrays.
Scalar inputs return length-1 arrays.
DATA input: DIC and ALk in mol/kg, in situ temperature, pressure.
"""
return mocsy.mvars(temp=18,
sal=35,
alk=2300.e-6,
dic=2000.e-6,
sil=0,
phos=0,
patm=1,
depth=100,
lat=0,
optcon='mol/kg',
optt='Tinsitu',
optp='db',
optb='u74',
optk1k2='l',
optkf='dg',
optgas='Pinsitu')
def test_return_12():
ret = scalar_variables()
assert len(ret) == 12
def test_return_scalar():
ret = scalar_variables()
for var in ret:
assert len(var) == 1
def test_return_real():
ret = scalar_variables()
for var in ret:
assert np.isreal(var)
def test_known_values():
ret = scalar_variables()
pH = 8.14892578
pco2 = 312.28662109
fco2 = 300.68057251
co2 = 1.01729711e-05
hco3 = 0.00177952
co3 = 0.00021031
OmegaA = 3.19940853
OmegaC = 4.94189167
BetaD = 9.68977737
DENis = 1025.71105957
p = 100.0
Tis = 18.0
known = pH, pco2, fco2, co2, hco3, co3, OmegaA, OmegaC, BetaD, DENis, p, Tis
np.testing.assert_allclose(known, np.array(ret).ravel(), rtol=1e-05)
| {
"repo_name": "jamesorr/mocsy",
"path": "test/test_mocsy.py",
"copies": "1",
"size": "1647",
"license": "mit",
"hash": -700318831731802000,
"line_mean": 24.734375,
"line_max": 80,
"alpha_frac": 0.5069823922,
"autogenerated": false,
"ratio": 3.347560975609756,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354543367809756,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import os
import json
from datetime import datetime, timedelta
import pandas as pd
from pytz import utc
from blaze.server.serialization import (json_dumps, json_dumps_trusted,
object_hook, object_hook_trusted)
@pytest.mark.parametrize('serializers', [(json_dumps, object_hook),
(json_dumps_trusted, object_hook_trusted)])
@pytest.mark.parametrize('input_,serialized', (
([1, datetime(2000, 1, 1, 12, 30, 0, 0, utc)],
'[1, {"__!datetime": "2000-01-01T12:30:00+00:00"}]'),
([1, datetime(2000, 1, 1, 12, 30, 0, 0)],
'[1, {"__!datetime": "2000-01-01T12:30:00"}]'),
([1, pd.NaT], '[1, {"__!datetime": "NaT"}]'),
([1, frozenset([1, 2, 3])], '[1, {"__!frozenset": [1, 2, 3]}]'),
([1, timedelta(seconds=5)], '[1, {"__!timedelta": 5.0}]'),
))
def test_json_encoder(serializers, input_, serialized):
serializer, deserializer = serializers
result = json.dumps(input_, default=serializer)
assert result == serialized
assert json.loads(result, object_hook=deserializer) == input_
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/tests/test_serialization.py",
"copies": "3",
"size": "1179",
"license": "bsd-3-clause",
"hash": -5075330953334770000,
"line_mean": 38.3,
"line_max": 85,
"alpha_frac": 0.6022052587,
"autogenerated": false,
"ratio": 3.43731778425656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.553952304295656,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import os
import numpy as np
import sqlalchemy as sa
from datashape import discover, dshape
import datashape
from into.backends.sql import (dshape_to_table, create_from_datashape,
dshape_to_alchemy)
from into.utils import tmpfile, raises
from into import convert, append, resource, discover, into
def test_resource():
sql = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
assert sql.name == 'mytable'
assert isinstance(sql.bind, sa.engine.base.Engine)
assert set([c.name for c in sql.c]) == set(['x', 'y'])
def test_append_and_convert_round_trip():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('bank', metadata,
sa.Column('name', sa.String, primary_key=True),
sa.Column('balance', sa.Integer))
t.create()
data = [('Alice', 1), ('Bob', 2)]
append(t, data)
assert convert(list, t) == data
def test_plus_must_have_text():
with pytest.raises(NotImplementedError):
resource('redshift+://user:pass@host:1234/db')
def test_resource_on_file():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri, 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri + '::' + 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
def test_resource_to_engine():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
r = resource(uri)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
def test_resource_to_engine_to_create_tables():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
ds = datashape.dshape('{mytable: var * {name: string, amt: int}}')
r = resource(uri, dshape=ds)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
assert discover(r) == ds
def test_discovery():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('timestamp', sa.DateTime, primary_key=True))
assert discover(s) == \
dshape('var * {name: ?string, amount: ?int32, timestamp: datetime}')
def test_discovery_numeric_column():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('name', metadata,
sa.Column('name', sa.types.NUMERIC),)
assert discover(s)
def test_discover_null_columns():
assert dshape(discover(sa.Column('name', sa.String, nullable=True))) == \
dshape('{name: ?string}')
assert dshape(discover(sa.Column('name', sa.String, nullable=False))) == \
dshape('{name: string}')
def single_table_engine():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
t.create()
return engine, t
def test_select_to_iterator():
engine, t = single_table_engine()
append(t, [('Alice', 100), ('Bob', 200)])
sel = sa.select([t.c.amount + 1])
assert convert(list, sel) == [(101,), (201,)]
assert convert(list, sel, dshape=dshape('var * int')) == [101, 201]
sel2 = sa.select([sa.sql.func.sum(t.c.amount)])
assert convert(int, sel2, dshape=dshape('int')) == 300
sel3 = sa.select([t])
result = convert(list, sel3, dshape=discover(t))
assert type(result[0]) is tuple
def test_discovery_engine():
engine, t = single_table_engine()
assert discover(engine, 'accounts') == discover(t)
assert str(discover(engine)) == str(discover({'accounts': t}))
def test_discovery_metadata():
engine, t = single_table_engine()
metadata = t.metadata
assert str(discover(metadata)) == str(discover({'accounts': t}))
def test_discover_views():
engine, t = single_table_engine()
metadata = t.metadata
with engine.connect() as conn:
conn.execute('''CREATE VIEW myview AS
SELECT name, amount
FROM accounts
WHERE amount > 0''')
assert str(discover(metadata)) == str(discover({'accounts': t, 'myview': t}))
def test_extend_empty():
engine, t = single_table_engine()
assert not convert(list, t)
append(t, [])
assert not convert(list, t)
def test_dshape_to_alchemy():
assert dshape_to_alchemy('string') == sa.Text
assert isinstance(dshape_to_alchemy('string[40]'), sa.String)
assert not isinstance(dshape_to_alchemy('string["ascii"]'), sa.Unicode)
assert isinstance(dshape_to_alchemy('string[40, "U8"]'), sa.Unicode)
assert dshape_to_alchemy('string[40]').length == 40
assert dshape_to_alchemy('float32').precision == 24
assert dshape_to_alchemy('float64').precision == 53
def test_dshape_to_table():
t = dshape_to_table('bank', '{name: string, amount: int}')
assert isinstance(t, sa.Table)
assert t.name == 'bank'
assert [c.name for c in t.c] == ['name', 'amount']
def test_create_from_datashape():
engine = sa.create_engine('sqlite:///:memory:')
ds = dshape('''{bank: var * {name: string, amount: int},
points: var * {x: int, y: int}}''')
engine = create_from_datashape(engine, ds)
assert discover(engine) == ds
def test_into_table_iterator():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = dshape_to_table('points', '{x: int, y: int}', metadata=metadata)
t.create()
data = [(1, 1), (2, 4), (3, 9)]
append(t, data)
assert convert(list, t) == data
t2 = dshape_to_table('points2', '{x: int, y: int}', metadata=metadata)
t2.create()
data2 = [{'x': 1, 'y': 1}, {'x': 2, 'y': 4}, {'x': 3, 'y': 9}]
append(t2, data2)
assert convert(list, t2) == data
def test_sql_field_names_disagree_on_order():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
append(r, [(1, 2), (10, 20)], dshape=dshape('{y: int, x: int}'))
assert convert(set, r) == set([(2, 1), (20, 10)])
def test_sql_field_names_disagree_on_names():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
assert raises(Exception, lambda: append(r, [(1, 2), (10, 20)],
dshape=dshape('{x: int, z: int}')))
def test_resource_on_dialects():
assert (resource.dispatch('mysql://foo') is
resource.dispatch('mysql+pymysql://foo'))
assert (resource.dispatch('never-before-seen-sql://foo') is
resource.dispatch('mysql://foo'))
@pytest.yield_fixture
def sqlite_file():
try:
yield 'sqlite:///db.db'
finally:
os.remove('db.db')
def test_append_from_select(sqlite_file):
# we can't test in memory here because that creates two independent
# databases
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'S5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'S5')])
t = into('%s::t' % sqlite_file, raw)
s = into('%s::s' % sqlite_file, raw2)
t = append(t, s.select())
result = into(list, t)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_engine_metadata_caching():
with tmpfile('db') as fn:
engine = resource('sqlite:///' + fn)
a = resource('sqlite:///' + fn + '::a', dshape=dshape('var * {x: int}'))
b = resource('sqlite:///' + fn + '::b', dshape=dshape('var * {y: int}'))
assert a.metadata is b.metadata
assert engine is a.bind is b.bind
| {
"repo_name": "mrocklin/into",
"path": "into/backends/tests/test_sql.py",
"copies": "1",
"size": "8312",
"license": "bsd-3-clause",
"hash": -8104724058429592000,
"line_mean": 30.7251908397,
"line_max": 81,
"alpha_frac": 0.5754331088,
"autogenerated": false,
"ratio": 3.444674678823042,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4520107787623042,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import pandas as pd
from operator import (add, sub, mul, floordiv, mod, pow, truediv, eq, ne, lt,
gt, le, ge, getitem)
from functools import partial
from datetime import datetime
import datashape
from datashape.predicates import iscollection, isscalar
from blaze import CSV, Table
from blaze.expr import (TableSymbol, projection, Field, selection, Broadcast,
join, cos, by, union, exp, distinct, Apply,
broadcast, eval_str, merge, common_subexpression, sum,
Label, ReLabel, Head, Sort, any, summary,
Summary, count, Symbol, Field, discover,
max, min
)
from blaze.expr.broadcast import _expr_child
from blaze.compatibility import PY3, builtins
from blaze.utils import raises, tmpfile
from datashape import dshape, var, int32, int64, Record, DataShape
from toolz import identity, first
import numpy as np
def test_dshape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.dshape == dshape('var * {name: string, amount: int}')
def test_length():
t = TableSymbol('t', '10 * {name: string, amount: int}')
s = TableSymbol('s', '{name:string, amount:int}')
assert t.dshape == dshape('10 * {name: string, amount: int}')
assert len(t) == 10
assert len(t.name) == 10
assert len(t[['name']]) == 10
assert len(t.sort('name')) == 10
assert len(t.head(5)) == 5
assert len(t.head(50)) == 10
with pytest.raises(ValueError):
len(s)
def test_tablesymbol_eq():
assert not (TableSymbol('t', '{name: string}')
== TableSymbol('v', '{name: string}'))
def test_table_name():
t = TableSymbol('t', '10 * {people: string, amount: int}')
r = TableSymbol('r', 'int64')
with pytest.raises(AttributeError):
t.name
with pytest.raises(AttributeError):
r.name
def test_shape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.shape
assert isinstance(t.shape, tuple)
assert len(t.shape) == 1
def test_table_symbol_bool():
t = TableSymbol('t', '10 * {name: string, amount: int}')
assert t.__bool__() == True
def test_nonzero():
t = TableSymbol('t', '10 * {name: string, amount: int}')
assert t
assert (not not t) is True
def test_eq():
assert TableSymbol('t', '{a: string, b: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
assert not TableSymbol('t', '{b: string, a: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
def test_arithmetic():
t = TableSymbol('t', '{x: int, y: int, z: int}')
x, y, z = t['x'], t['y'], t['z']
exprs = [x + 1, x + y, 1 + y,
x - y, 1 - x, x - 1,
x ** y, x ** 2, 2 ** x,
x * y, x ** 2, 2 ** x,
x / y, x / 2, 2 / x,
x % y, x % 2, 2 % x]
def test_column():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.fields== ['name', 'amount']
assert eval(str(t.name)) == t.name
assert str(t.name) == "t.name"
with pytest.raises(AttributeError):
t.name.balance
with pytest.raises((NotImplementedError, ValueError)):
getitem(t, set('balance'))
def test_symbol_projection_failures():
t = TableSymbol('t', '10 * {name: string, amount: int}')
with pytest.raises(ValueError):
t._project(['name', 'id'])
with pytest.raises(AttributeError):
t.foo
with pytest.raises(TypeError):
t._project(t.dshape)
def test_Projection():
t = TableSymbol('t', '{name: string, amount: int, id: int32}')
p = projection(t, ['amount', 'name'])
assert p.schema == dshape('{amount: int32, name: string}')
print(t['amount'].dshape)
print(dshape('var * int32'))
assert t['amount'].dshape == dshape('var * int32')
assert t['amount']._name == 'amount'
assert eval(str(p)).isidentical(p)
assert p._project(['amount','name']) == p[['amount','name']]
with pytest.raises(ValueError):
p._project('balance')
def test_Projection_retains_shape():
t = TableSymbol('t', '5 * {name: string, amount: int, id: int32}')
assert t[['name', 'amount']].dshape == \
dshape('5 * {name: string, amount: int}')
def test_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert t[['amount', 'id']] == projection(t, ['amount', 'id'])
assert t['amount'].isidentical(Field(t, 'amount'))
def test_relational():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
r = (t['name'] == 'Alice')
assert 'bool' in str(r.dshape)
assert r._name
def test_selection():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
s = selection(t, t['name'] == 'Alice')
f = selection(t, t['id'] > t['amount'])
p = t[t['amount'] > 100]
with pytest.raises(ValueError):
selection(t, p)
assert s.dshape == t.dshape
def test_selection_typecheck():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert raises(TypeError, lambda: t[t['amount'] + t['id']])
assert raises(TypeError, lambda: t[t['name']])
def test_selection_by_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t['name'] == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_by_getattr():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t.name == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_path_check():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
t2 = t[t.name == 'Alice']
t3 = t2[t2.amount > 0]
assert t3
def test_path_issue():
from blaze.api.dplyr import transform
t = TableSymbol('t', "{ topic : string, word : string, result : ?float64}")
t2 = transform(t, sizes=t.result.map(lambda x: (x - MIN)*10/(MAX - MIN),
schema='float64', name='size'))
assert t2.sizes in t2.children
def test_different_schema_raises():
with tmpfile('.csv') as filename:
df = pd.DataFrame(np.random.randn(10, 2))
df.to_csv(filename, index=False, header=False)
with pytest.raises(TypeError):
Table(CSV(filename), columns=list('ab'))
def test_getattr_doesnt_override_properties():
t = TableSymbol('t', '{_subs: string, schema: string}')
assert callable(t._subs)
assert isinstance(t.schema, DataShape)
def test_dir_contains_columns():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = dir(t)
columns_set = set(t.fields)
assert set(result) & columns_set == columns_set
def test_selection_consistent_children():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t['name'][t['amount'] < 0]
assert list(expr.fields) == ['name']
def test_broadcast_syntax():
t = TableSymbol('t', '{x: real, y: real, z: real}')
x, y, z = t['x'], t['y'], t['z']
assert (x + y).active_columns() == ['x', 'y']
assert (z + y).active_columns() == ['y', 'z']
assert ((z + y) * x).active_columns() == ['x', 'y', 'z']
expr = (z % x * y + z ** 2 > 0) & (x < 0)
assert isinstance(expr, Broadcast)
def test_str():
import re
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t[t['amount'] < 0]['name'] * 2
assert '<class' not in str(expr)
assert not re.search('0x[0-9a-f]+', str(expr))
assert eval(str(expr)) == expr
assert '*' in repr(expr)
def test_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
r = TableSymbol('r', '{name: string, amount: int}')
q = TableSymbol('q', '{name: int}')
j = join(t, s, 'name', 'name')
assert j.schema == dshape('{name: string, amount: int, id: int}')
assert join(t, s, 'name') == join(t, s, 'name')
assert join(t, s, 'name').on_left == 'name'
assert join(t, s, 'name').on_right == 'name'
assert join(t, r, ('name', 'amount')).on_left == ['name', 'amount']
with pytest.raises(TypeError):
join(t, q, 'name')
with pytest.raises(ValueError):
join(t, s, how='upside_down')
def test_join_different_on_right_left_columns():
t = TableSymbol('t', '{x: int, y: int}')
s = TableSymbol('t', '{a: int, b: int}')
j = join(t, s, 'x', 'a')
assert j.on_left == 'x'
assert j.on_right == 'a'
def test_joined_column_first_in_schema():
t = TableSymbol('t', '{x: int, y: int, z: int}')
s = TableSymbol('s', '{w: int, y: int}')
assert join(t, s).schema == dshape('{y: int, x: int, z: int, w: int}')
def test_outer_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
jleft = join(t, s, 'name', 'name', how='left')
jright = join(t, s, 'name', 'name', how='right')
jinner = join(t, s, 'name', 'name', how='inner')
jouter = join(t, s, 'name', 'name', how='outer')
js = [jleft, jright, jinner, jouter]
assert len(set(js)) == 4 # not equal
assert jinner.schema == dshape('{name: string, amount: int, id: int}')
assert jleft.schema == dshape('{name: string, amount: int, id: ?int}')
assert jright.schema == dshape('{name: string, amount: ?int, id: int}')
assert jouter.schema == dshape('{name: string, amount: ?int, id: ?int}')
# Default behavior
assert join(t, s, 'name', 'name', how='inner') == \
join(t, s, 'name', 'name')
def test_join_default_shared_columns():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
assert join(t, s) == join(t, s, 'name', 'name')
def test_multi_column_join():
a = TableSymbol('a', '{x: int, y: int, z: int}')
b = TableSymbol('b', '{w: int, x: int, y: int}')
j = join(a, b, ['x', 'y'])
assert set(j.fields) == set('wxyz')
assert j.on_left == j.on_right == ['x', 'y']
assert hash(j)
assert j.fields == ['x', 'y', 'z', 'w']
def test_traverse():
t = TableSymbol('t', '{name: string, amount: int}')
assert t in list(t._traverse())
expr = t.amount.sum()
trav = list(expr._traverse())
assert builtins.any(t.amount.isidentical(x) for x in trav)
def test_unary_ops():
t = TableSymbol('t', '{name: string, amount: int}')
expr = cos(exp(t['amount']))
assert 'cos' in str(expr)
assert '~' in str(~(t.amount > 0))
def test_reduction():
t = TableSymbol('t', '{name: string, amount: int32}')
r = sum(t['amount'])
print(type(r.dshape))
print(type(dshape('int32')))
print(r.dshape)
assert r.dshape in (dshape('int32'),
dshape('{amount: int32}'),
dshape('{amount_sum: int32}'))
assert 'amount' not in str(t.count().dshape)
assert t.count().dshape[0] in (int32, int64)
assert 'int' in str(t.count().dshape)
assert 'int' in str(t.nunique().dshape)
assert 'string' in str(t['name'].max().dshape)
assert 'string' in str(t['name'].min().dshape)
assert 'string' not in str(t.count().dshape)
t = TableSymbol('t', '{name: string, amount: real, id: int}')
assert 'int' in str(t['id'].sum().dshape)
assert 'int' not in str(t['amount'].sum().dshape)
def test_reduction_name():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert (t.amount + t.id).sum()._name
def test_max_min_class():
t = TableSymbol('t', '{name: string, amount: int32}')
assert str(max(t).dshape) == '{ name : string, amount : int32 }'
assert str(min(t).dshape) == '{ name : string, amount : int32 }'
@pytest.fixture
def symsum():
t = TableSymbol('t', '{name: string, amount: int32}')
return t, t.amount.sum()
@pytest.fixture
def ds():
return dshape("var * { "
"transaction_key : int64, "
"user_from_key : int64, "
"user_to_key : int64, "
"date : int64, "
"value : float64 "
"}")
def test_discover_dshape_symbol(ds):
t_ds = TableSymbol('t', dshape=ds)
assert t_ds.fields is not None
t_sch = TableSymbol('t', dshape=ds.subshape[0])
assert t_sch.fields is not None
assert t_ds.isidentical(t_sch)
class TestScalarArithmetic(object):
ops = {'+': add, '-': sub, '*': mul, '/': truediv, '//': floordiv, '%': mod,
'**': pow, '==': eq, '!=': ne, '<': lt, '>': gt, '<=': le, '>=': ge}
def test_scalar_arith(self, symsum):
def runner(f):
result = f(r, 1)
assert eval('r %s 1' % op).isidentical(result)
a = f(r, r)
b = eval('r %s r' % op)
assert a is b or a.isidentical(b)
result = f(1, r)
assert eval('1 %s r' % op).isidentical(result)
t, r = symsum
r = t.amount.sum()
for op, f in self.ops.items():
runner(f)
def test_scalar_usub(self, symsum):
t, r = symsum
result = -r
assert eval(str(result)).isidentical(result)
@pytest.mark.xfail
def test_scalar_uadd(self, symsum):
t, r = symsum
+r
def test_summary():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
s = summary(total=t.amount.sum(), num=t.id.count())
assert s.dshape == dshape('{num: int32, total: int32}')
assert hash(s)
assert eval(str(s)).isidentical(s)
assert 'summary(' in str(s)
assert 'total=' in str(s)
assert 'num=' in str(s)
assert str(t.amount.sum()) in str(s)
assert not summary(total=t.amount.sum())._child.isidentical(
t.amount.sum())
assert iscollection(summary(total=t.amount.sum() + 1)._child.dshape)
def test_reduction_arithmetic():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
expr = t.amount.sum() + 1
assert eval(str(expr)).isidentical(expr)
def test_Distinct():
t = TableSymbol('t', '{name: string, amount: int32}')
r = distinct(t['name'])
print(r.dshape)
assert r.dshape == dshape('var * string')
assert r._name == 'name'
r = t.distinct()
assert r.dshape == t.dshape
def test_by():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
r = by(t['name'], sum(t['amount']))
print(r.schema)
assert isinstance(r.schema[0], Record)
assert str(r.schema[0]['name']) == 'string'
def test_by_summary():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
a = by(t['name'], sum=sum(t['amount']))
b = by(t['name'], summary(sum=sum(t['amount'])))
assert a.isidentical(b)
def test_by_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert len(by(t['id'], t['amount'].sum()).fields) == 2
assert len(by(t['id'], t['id'].count()).fields) == 2
print(by(t, t.count()).fields)
assert len(by(t, t.count()).fields) == 4
def test_sort():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.sort('amount', ascending=True)
print(str(s))
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
assert t['amount'].sort().key == 'amount'
def test_head():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.head(10)
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
def test_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
quantity = (t['amount'] + 100).label('quantity')
assert eval(str(quantity)).isidentical(quantity)
assert quantity.fields == ['quantity']
with pytest.raises(ValueError):
quantity['balance']
def test_map_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
c = t.amount.map(identity, schema='int32')
assert c.label('bar')._name == 'bar'
assert c.label('bar')._child.isidentical(c._child)
def test_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert list(t.fields) == ['name', 'amount', 'id']
assert list(t['name'].fields) == ['name']
(t['amount'] + 1).fields
def test_relabel():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
rl = t.relabel({'name': 'NAME', 'id': 'ID'})
rlc = t['amount'].relabel({'amount': 'BALANCE'})
assert eval(str(rl)).isidentical(rl)
print(rl.fields)
assert rl.fields == ['NAME', 'amount', 'ID']
assert not isscalar(rl.dshape.measure)
assert isscalar(rlc.dshape.measure)
def test_relabel_join():
names = TableSymbol('names', '{first: string, last: string}')
siblings = join(names.relabel({'last': 'left'}),
names.relabel({'last': 'right'}), 'first')
assert siblings.fields == ['first', 'left', 'right']
def test_map():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
inc = lambda x: x + 1
assert isscalar(t['amount'].map(inc, schema='int').dshape.measure)
s = t['amount'].map(inc, schema='{amount: int}')
assert not isscalar(s.dshape.measure)
assert s.dshape == dshape('var * {amount: int}')
expr = (t[['name', 'amount']]
.map(identity, schema='{name: string, amount: int}'))
assert expr._name is None
@pytest.mark.xfail(reason="Not sure that we should even support this")
def test_map_without_any_info():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert iscolumn(t['amount'].map(inc))
assert not iscolumn(t[['name', 'amount']].map(identity))
def test_apply():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = Apply(t['amount'], sum, dshape='real')
r = Apply(t['amount'], sum, dshape='3 * real')
l = Apply(t['amount'], sum)
assert s.dshape == dshape('real')
assert r.schema == dshape("float64")
with pytest.raises(TypeError):
s.schema
with pytest.raises(NotImplementedError):
l.dshape
def test_broadcast():
from blaze.expr.arithmetic import Add, Eq, Mult, Le
t = TableSymbol('t', '{x: int, y: int, z: int}')
t2 = TableSymbol('t', '{a: int, b: int, c: int}')
x = t['x']
y = t['y']
z = t['z']
a = t2['a']
b = t2['b']
c = t2['c']
assert str(broadcast(Add, x, y)._expr) == 'x + y'
assert broadcast(Add, x, y)._child.isidentical(t)
c1 = broadcast(Add, x, y)
c2 = broadcast(Mult, x, z)
assert eval_str(broadcast(Eq, c1, c2)._expr) == '(x + y) == (x * z)'
assert broadcast(Eq, c1, c2)._child.isidentical(t)
assert str(broadcast(Add, x, 1)._expr) == 'x + 1'
assert str(x <= y) == "t.x <= t.y"
assert str(x >= y) == "t.x >= t.y"
assert str(x | y) == "t.x | t.y"
assert str(x.__ror__(y)) == "t.y | t.x"
assert str(x.__rand__(y)) == "t.y & t.x"
with pytest.raises(ValueError):
broadcast(Add, x, a)
def test_expr_child():
t = TableSymbol('t', '{x: int, y: int, z: int}')
w = t['x'].label('w')
assert str(_expr_child(w)) == '(x, t)'
def test_TableSymbol_printing_is_legible():
accounts = TableSymbol('accounts', '{name: string, balance: int, id: int}')
expr = (exp(accounts.balance * 10)) + accounts['id']
assert "exp(accounts.balance * 10)" in str(expr)
assert "+ accounts.id" in str(expr)
def test_merge():
t = TableSymbol('t', 'int64')
p = TableSymbol('p', '{amount:int}')
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts.balance * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c.fields == ['name', 'balance', 'new']
assert c.schema == dshape('{name: string, balance: int32, new: float64}')
with pytest.raises(ValueError):
merge(t, t)
with pytest.raises(ValueError):
merge(t, p)
def test_merge_repeats():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
with pytest.raises(ValueError):
merge(accounts, (accounts.balance + 1).label('balance'))
def test_merge_project():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts['balance'] * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c['new'].isidentical(new_amount)
assert c['name'].isidentical(accounts['name'])
assert c[['name', 'new']].isidentical(merge(accounts.name, new_amount))
inc = lambda x: x + 1
def test_subterms():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert list(a._subterms()) == [a]
assert set(a['x']._subterms()) == set([a, a['x']])
assert set(a['x'].map(inc)._subterms()) == set([a, a['x'], a['x'].map(inc)])
assert a in set((a['x'] + 1)._subterms())
def test_common_subexpression():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert common_subexpression(a).isidentical(a)
assert common_subexpression(a, a['x']).isidentical(a)
assert common_subexpression(a['y'] + 1, a['x']).isidentical(a)
assert common_subexpression(a['x'].map(inc), a['x']).isidentical(a['x'])
def test_schema_of_complex_interaction():
a = TableSymbol('a', '{x: int, y: int, z: int}')
expr = (a['x'] + a['y']) / a['z']
assert expr.schema == dshape('real')
expr = expr.label('foo')
assert expr.schema == dshape('real')
def iscolumn(x):
return isscalar(x.dshape.measure)
def test_iscolumn():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert not iscolumn(a)
assert iscolumn(a['x'])
assert not iscolumn(a[['x', 'y']])
assert not iscolumn(a[['x']])
assert iscolumn((a['x'] + a['y']))
assert iscolumn(a['x'].distinct())
assert not iscolumn(a[['x']].distinct())
assert not iscolumn(by(a['x'], a['y'].sum()))
assert iscolumn(a['x'][a['x'] > 1])
assert not iscolumn(a[['x', 'y']][a['x'] > 1])
assert iscolumn(a['x'].sort())
assert not iscolumn(a[['x', 'y']].sort())
assert iscolumn(a['x'].head())
assert not iscolumn(a[['x', 'y']].head())
assert iscolumn(TableSymbol('b', 'int'))
assert not iscolumn(TableSymbol('b', '{x: int}'))
def test_discover():
schema = '{x: int, y: int, z: int}'
a = TableSymbol('a', schema)
assert discover(a) == var * schema
def test_improper_selection():
t = TableSymbol('t', '{x: int, y: int, z: int}')
assert raises(Exception, lambda: t[t['x'] > 0][t.sort()[t['y' > 0]]])
def test_union():
schema = '{x: int, y: int, z: int}'
a = TableSymbol('a', schema)
b = TableSymbol('b', schema)
c = TableSymbol('c', schema)
u = union(a, b, c)
assert u.schema == a.schema
assert raises(Exception,
lambda: union(a, TableSymbol('q', '{name: string}')))
def test_serializable():
t = TableSymbol('t', '{id: int, name: string, amount: int}')
import pickle
t2 = pickle.loads(pickle.dumps(t))
assert t.isidentical(t2)
s = TableSymbol('t', '{id: int, city: string}')
expr = join(t[t.amount < 0], s).sort('id').city.head()
expr2 = pickle.loads(pickle.dumps(expr))
assert expr.isidentical(expr2)
def test_table_coercion():
from datetime import date
t = TableSymbol('t', '{name: string, amount: int, timestamp: ?date}')
assert (t.amount + '10')._expr.rhs == 10
assert (t.timestamp < '2014-12-01')._expr.rhs == date(2014, 12, 1)
def test_isnan():
from blaze import isnan
t = TableSymbol('t', '{name: string, amount: real, timestamp: ?date}')
for expr in [t.amount.isnan(), ~t.amount.isnan()]:
assert eval(str(expr)).isidentical(expr)
assert iscollection(t.amount.isnan().dshape)
assert 'bool' in str(t.amount.isnan().dshape)
def test_broadcast_naming():
t = TableSymbol('t', '{x: int, y: int, z: int}')
assert t.x._name == 'x'
assert (t.x + 1)._name == 'x'
def test_scalar_expr():
t = TableSymbol('t', '{x: int64, y: int32, z: int64}')
x = t.x._expr
y = t.y._expr
assert 'int64' in str(x.dshape)
assert 'int32' in str(y.dshape)
expr = (t.x + 1)._expr
assert expr._inputs[0].dshape == x.dshape
assert expr._inputs[0].isidentical(x)
t = TableSymbol('t', '{ amount : int64, id : int64, name : string }')
expr = (t.amount + 1)._expr
assert 'int64' in str(expr._inputs[0].dshape)
def test_distinct_name():
t = TableSymbol('t', '{id: int32, name: string}')
assert t.name.isidentical(t['name'])
assert t.distinct().name.isidentical(t.distinct()['name'])
assert t.id.distinct()._name == 'id'
assert t.name._name == 'name'
def test_leaves():
t = TableSymbol('t', '{id: int32, name: string}')
v = TableSymbol('v', '{id: int32, city: string}')
x = Symbol('x', 'int32')
assert t._leaves() == [t]
assert t.id._leaves() == [t]
assert by(t.name, t.id.nunique())._leaves() == [t]
assert join(t, v)._leaves() == [t, v]
assert join(v, t)._leaves() == [v, t]
assert (x + 1)._leaves() == [x]
@pytest.fixture
def t():
return TableSymbol('t', '{id: int, amount: float64, name: string}')
def funcname(x, y='<lambda>'):
if PY3:
return 'TestRepr.%s.<locals>.%s' % (x, y)
return 'test_table.%s' % y
class TestRepr(object):
def test_partial_lambda(self, t):
expr = t.amount.map(partial(lambda x, y: x + y, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_schema=None, _name0=None)" %
funcname('test_partial_lambda'))
def test_lambda(self, t):
expr = t.amount.map(lambda x: x)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _schema=None, _name0=None)" %
funcname('test_lambda'))
def test_partial(self, t):
def myfunc(x, y):
return x + y
expr = t.amount.map(partial(myfunc, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_schema=None, _name0=None)" % funcname('test_partial',
'myfunc'))
def test_builtin(self, t):
expr = t.amount.map(datetime.fromtimestamp)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=datetime.fromtimestamp, _schema=None,"
" _name0=None)")
def test_udf(self, t):
def myfunc(x):
return x + 1
expr = t.amount.map(myfunc)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _schema=None,"
" _name0=None)" % funcname('test_udf', 'myfunc'))
def test_nested_partial(self, t):
def myfunc(x, y, z):
return x + y + z
f = partial(partial(myfunc, 2), 1)
expr = t.amount.map(f)
s = str(expr)
assert s == ("Map(_child=t.amount, func=partial(partial(%s, 2), 1),"
" _schema=None, _name0=None)" %
funcname('test_nested_partial', 'myfunc'))
def test_count_values():
t = TableSymbol('t', '{name: string, amount: int, city: string}')
assert t.name.count_values(sort=False).isidentical(
by(t.name, count=t.name.count()))
assert t.name.count_values(sort=True).isidentical(
by(t.name, count=t.name.count()).sort('count', ascending=False))
def test_dir():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert 'day' in dir(t.dt)
assert 'mean' not in dir(t.dt)
assert 'mean' in dir(t.amount)
assert 'like' not in dir(t[['amount', 'dt']])
assert 'any' not in dir(t.name)
def test_distinct_column():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.name.distinct().name.dshape == t.name.distinct().dshape
assert t.name.distinct().name.isidentical(t.name.distinct())
def test_columns_attribute_for_backwards_compatibility():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.columns == t.fields
assert 'columns' in dir(t)
assert 'columns' not in dir(t.name)
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/tests/test_table.py",
"copies": "1",
"size": "28416",
"license": "bsd-3-clause",
"hash": 2622104873757055000,
"line_mean": 28.9746835443,
"line_max": 80,
"alpha_frac": 0.5652801239,
"autogenerated": false,
"ratio": 3.137116361227644,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9198205770069081,
"avg_score": 0.0008381430117126049,
"num_lines": 948
} |
from __future__ import absolute_import, division, print_function
import pytest
import platform
pytestmark = pytest.mark.skipif('windows' in platform.platform().lower(),
reason='No Mongo support on Windows.')
pymongo = pytest.importorskip('pymongo')
from datetime import datetime
from toolz import pluck, reduceby, groupby
from datashape import Record
from blaze import into, compute, compute_up, discover, dshape, Data
from blaze.compute.mongo import MongoQuery
from blaze.expr import symbol, by, floor, ceil
from blaze.compatibility import xfail
@pytest.fixture(scope='module')
def conn():
try:
return pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip('No mongo server running')
@pytest.fixture(scope='module')
def db(conn):
return conn.test_db
bank_raw = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
@pytest.yield_fixture
def big_bank(db):
data = [{'name': 'Alice', 'amount': 100, 'city': 'New York City'},
{'name': 'Alice', 'amount': 200, 'city': 'Austin'},
{'name': 'Bob', 'amount': 100, 'city': 'New York City'},
{'name': 'Bob', 'amount': 200, 'city': 'New York City'},
{'name': 'Bob', 'amount': 300, 'city': 'San Francisco'}]
coll = db.bigbank
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def date_data(db):
n = 3
d = {'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, i) for i in [1, 2, 3]],
'amount': [100, 200, 300],
'id': [1, 2, 3]}
data = [dict(zip(d.keys(), [d[k][i] for k in d.keys()]))
for i in range(n)]
coll = into(db.date_data, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def bank(db):
coll = db.bank
coll = into(coll, bank_raw)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def missing_vals(db):
data = [{'x': 1, 'z': 100},
{'x': 2, 'y': 20, 'z': 200},
{'x': 3, 'z': 300},
{'x': 4, 'y': 40}]
coll = db.missing_vals
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def points(db):
data = [{'x': 1, 'y': 10, 'z': 100},
{'x': 2, 'y': 20, 'z': 200},
{'x': 3, 'y': 30, 'z': 300},
{'x': 4, 'y': 40, 'z': 400}]
coll = db.points
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def events(db):
data = [{'time': datetime(2012, 1, 1, 12, 00, 00), 'x': 1},
{'time': datetime(2012, 1, 2, 12, 00, 00), 'x': 2},
{'time': datetime(2012, 1, 3, 12, 00, 00), 'x': 3}]
coll = db.events
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
t = symbol('t', 'var * {name: string, amount: int}')
bigt = symbol('bigt', 'var * {name: string, amount: int, city: string}')
p = symbol('p', 'var * {x: int, y: int, z: int}')
e = symbol('e', 'var * {time: datetime, x: int}')
q = MongoQuery('fake', [])
def test_compute_on_db(bank, points):
assert bank.database == points.database
db = bank.database
d = symbol(db.name, discover(db))
assert (compute(d.points.x.sum(), db) ==
sum(x['x'] for x in db.points.find()))
def test_symbol(bank):
assert compute(t, bank) == list(pluck(['name', 'amount'], bank_raw))
def test_projection_one():
assert compute_up(t[['name']], q).query == ({'$project': {'name': 1}},)
def test_head_one():
assert compute_up(t.head(5), q).query == ({'$limit': 5},)
def test_head(bank):
assert len(compute(t.head(2), bank)) == 2
def test_projection(bank):
assert set(compute(t.name, bank)) == set(['Alice', 'Bob'])
assert set(compute(t[['name']], bank)) == set([('Alice',), ('Bob',)])
def test_selection(bank):
assert set(compute(t[t.name == 'Alice'], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t['Alice' == t.name], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t[t.amount > 200], bank)) == set([('Bob', 300)])
assert set(compute(t[t.amount >= 200], bank)) == set([('Bob', 300),
('Bob', 200),
('Alice', 200)])
assert set(compute(t[t.name != 'Alice'].name, bank)) == set(['Bob'])
assert set(compute(t[(t.name == 'Alice') & (t.amount > 150)], bank)) == \
set([('Alice', 200)])
assert set(compute(t[(t.name == 'Alice') | (t.amount > 250)], bank)) == \
set([('Alice', 200),
('Alice', 100),
('Bob', 300)])
def test_columnwise(points):
assert set(compute(p.x + p.y, points)) == set([11, 22, 33, 44])
def test_columnwise_multiple_operands(points):
expected = [x['x'] + x['y'] - x['z'] * x['x'] / 2 for x in points.find()]
assert set(compute(p.x + p.y - p.z * p.x / 2, points)) == set(expected)
def test_arithmetic(points):
expr = p.y // p.x
assert set(compute(expr, points)) == set(compute(expr, points.find()))
def test_columnwise_mod(points):
expected = [x['x'] % x['y'] - x['z'] * x['x'] / 2 + 1
for x in points.find()]
expr = p.x % p.y - p.z * p.x / 2 + 1
assert set(compute(expr, points)) == set(expected)
@xfail(raises=NotImplementedError,
reason='MongoDB does not implement certain arith ops')
def test_columnwise_pow(points):
expected = [x['x'] ** x['y'] for x in points.find()]
assert set(compute(p.x ** p.y, points)) == set(expected)
def test_by_one():
assert compute_up(by(t.name, total=t.amount.sum()), q).query == \
({'$group': {'_id': {'name': '$name'},
'total': {'$sum': '$amount'}}},
{'$project': {'total': '$total', 'name': '$_id.name'}})
def test_by(bank):
assert set(compute(by(t.name, total=t.amount.sum()), bank)) == \
set([('Alice', 300), ('Bob', 600)])
assert set(compute(by(t.name, min=t.amount.min()), bank)) == \
set([('Alice', 100), ('Bob', 100)])
assert set(compute(by(t.name, max=t.amount.max()), bank)) == \
set([('Alice', 200), ('Bob', 300)])
assert set(compute(by(t.name, count=t.name.count()), bank)) == \
set([('Alice', 2), ('Bob', 3)])
def test_reductions(bank):
assert compute(t.amount.min(), bank) == 100
assert compute(t.amount.max(), bank) == 300
assert compute(t.amount.sum(), bank) == 900
def test_distinct(bank):
assert set(compute(t.name.distinct(), bank)) == set(['Alice', 'Bob'])
def test_nunique_collection(bank):
assert compute(t.nunique(), bank) == len(bank_raw)
def test_sort(bank):
assert compute(t.amount.sort('amount'), bank) == \
[100, 100, 200, 200, 300]
assert compute(t.amount.sort('amount', ascending=False), bank) == \
[300, 200, 200, 100, 100]
def test_by_multi_column(bank):
assert set(compute(by(t[['name', 'amount']], count=t.count()), bank)) == \
set([(d['name'], d['amount'], 1) for d in bank_raw])
def test_datetime_handling(events):
assert set(compute(e[e.time >= datetime(2012, 1, 2, 12, 0, 0)].x,
events)) == set([2, 3])
assert set(compute(e[e.time >= "2012-01-02"].x,
events)) == set([2, 3])
def test_summary_kwargs(bank):
expr = by(t.name, total=t.amount.sum(), avg=t.amount.mean())
result = compute(expr, bank)
assert result == [('Bob', 200.0, 600), ('Alice', 150.0, 300)]
def test_summary_count(bank):
expr = by(t.name, how_many=t.amount.count())
result = compute(expr, bank)
assert result == [('Bob', 3), ('Alice', 2)]
def test_summary_arith(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).sum())
result = compute(expr, bank)
assert result == [('Bob', 603), ('Alice', 302)]
def test_summary_arith_min(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).min())
result = compute(expr, bank)
assert result == [('Bob', 101), ('Alice', 101)]
def test_summary_arith_max(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).max())
result = compute(expr, bank)
assert result == [('Bob', 301), ('Alice', 201)]
def test_summary_complex_arith(bank):
expr = by(t.name, arith=(100 - t.amount * 2 / 30.0).sum())
result = compute(expr, bank)
reducer = lambda acc, x: (100 - x['amount'] * 2 / 30.0) + acc
expected = reduceby('name', reducer, bank.find(), 0)
assert set(result) == set(expected.items())
def test_summary_complex_arith_multiple(bank):
expr = by(t.name, arith=(100 - t.amount * 2 / 30.0).sum(),
other=t.amount.mean())
result = compute(expr, bank)
reducer = lambda acc, x: (100 - x['amount'] * 2 / 30.0) + acc
expected = reduceby('name', reducer, bank.find(), 0)
mu = reduceby('name', lambda acc, x: acc + x['amount'], bank.find(), 0.0)
values = list(mu.values())
items = expected.items()
counts = groupby('name', bank.find())
items = [x + (float(v) / len(counts[x[0]]),)
for x, v in zip(items, values)]
assert set(result) == set(items)
def test_like(bank):
bank.create_index([('name', pymongo.TEXT)])
expr = t[t.name.like('*Alice*')]
result = compute(expr, bank)
assert set(result) == set((('Alice', 100), ('Alice', 200)))
def test_like_multiple(big_bank):
expr = bigt[bigt.name.like('*Bob*') & bigt.city.like('*York*')]
result = compute(expr, big_bank)
assert set(result) == set(
(('Bob', 100, 'New York City'), ('Bob', 200, 'New York City'))
)
def test_like_mulitple_no_match(big_bank):
# make sure we aren't OR-ing the matches
expr = bigt[bigt.name.like('*York*') & bigt.city.like('*Bob*')]
assert not set(compute(expr, big_bank))
def test_missing_values(missing_vals):
assert discover(missing_vals).subshape[0] == \
dshape('{x: int64, y: ?int64, z: ?int64}')
assert set(compute(p.y, missing_vals)) == set([None, 20, None, 40])
def test_datetime_access(date_data):
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
py_data = into(list, date_data) # a python version of the collection
for attr in ['day', 'minute', 'second', 'year', 'month']:
assert list(compute(getattr(t.when, attr), date_data)) == \
list(compute(getattr(t.when, attr), py_data))
def test_datetime_access_and_arithmetic(date_data):
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
py_data = into(list, date_data) # a python version of the collection
expr = t.when.day + t.id
assert list(compute(expr, date_data)) == list(compute(expr, py_data))
def test_floor_ceil(bank):
t = symbol('t', discover(bank))
assert set(compute(200 * floor(t.amount / 200), bank)) == set([0, 200])
assert set(compute(200 * ceil(t.amount / 200), bank)) == set([200, 400])
def test_Data_construct(bank, points):
d = Data('mongodb://localhost/test_db')
assert 'bank' in d.fields
assert 'points' in d.fields
assert isinstance(d.dshape.measure, Record)
def test_Data_construct_with_table(bank):
d = Data('mongodb://localhost/test_db::bank')
assert set(d.fields) == set(('name', 'amount'))
assert int(d.count()) == 5
def test_and_same_key(bank):
expr = t[(t.amount > 100) & (t.amount < 300)]
result = compute(expr, bank)
expected = [('Alice', 200), ('Bob', 200)]
assert result == expected
def test_interactive_dshape_works():
try:
d = Data('mongodb://localhost:27017/test_db::bank',
dshape='var * {name: string, amount: int64}')
except pymongo.errors.ConnectionFailure:
pytest.skip('No mongo server running')
assert d.dshape == dshape('var * {name: string, amount: int64}')
@pytest.mark.xfail(raises=TypeError, reason="IsIn not yet implemented")
def test_isin_fails(bank):
expr = t[t.amount.isin([100])]
result = compute(expr, bank)
assert result == compute(t[t.amount == 100], bank)
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/compute/tests/test_mongo_compute.py",
"copies": "2",
"size": "12480",
"license": "bsd-3-clause",
"hash": -7574368221058348000,
"line_mean": 30.1221945137,
"line_max": 82,
"alpha_frac": 0.5564903846,
"autogenerated": false,
"ratio": 3.1901840490797544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9743793817678789,
"avg_score": 0.0005761232001932792,
"num_lines": 401
} |
from __future__ import absolute_import, division, print_function
import pytest
import requests_mock
from appr.client import DEFAULT_PREFIX
from appr.commands.inspect import InspectCmd
def get_inspectcmd(cli_parser, args=[]):
options = cli_parser.parse_args(["inspect"] + args)
return InspectCmd(options)
def test_inspect_init(cli_parser):
inspectcmd = get_inspectcmd(cli_parser, ["kpm.sh/foo/bar", "-t", "helm"])
assert inspectcmd.version == "default"
assert inspectcmd.registry_host == "kpm.sh"
assert InspectCmd.name == "inspect"
def test_inspect_tree(cli_parser, package_blob, capsys):
inspectcmd = get_inspectcmd(cli_parser, ["kpm.sh/foo/bar@1.0.0", "-t", "helm", "--tree"])
with requests_mock.mock() as m:
response = package_blob
m.get("https://kpm.sh" + DEFAULT_PREFIX + "/api/v1/packages/foo/bar/1.0.0/helm/pull", content=response)
inspectcmd.exec_cmd()
out, err = capsys.readouterr()
default_out = ["README.md", "manifest.yaml", "templates/rocketchat-rc.yml", "templates/rocketchat-svc.yml\n"]
default_out.sort()
assert out == "\n".join(default_out)
def test_inspect_default(cli_parser, package_blob, capsys):
""" Default is the tree view """
inspectcmd = get_inspectcmd(cli_parser, ["kpm.sh/foo/bar@1.0.0", "-t", "helm", "--tree"])
inspectcmd_default_file = get_inspectcmd(cli_parser, ["kpm.sh/foo/bar@1.0.0", "-t", "helm"])
with requests_mock.mock() as m:
response = package_blob
m.get("https://kpm.sh" + DEFAULT_PREFIX + "/api/v1/packages/foo/bar/1.0.0/helm/pull", content=response)
inspectcmd.exec_cmd()
out, err = capsys.readouterr()
inspectcmd_default_file.exec_cmd()
default_out, default_err = capsys.readouterr()
assert out == default_out
def test_inspect_file(cli_parser, package_blob, capsys):
inspectcmd = get_inspectcmd(cli_parser, ["kpm.sh/foo/bar@1.0.0", "-t", "helm", "--file", "README.md"])
with requests_mock.mock() as m:
response = package_blob
m.get("https://kpm.sh" + DEFAULT_PREFIX + "/api/v1/packages/foo/bar/1.0.0/helm/pull", content=response)
inspectcmd.exec_cmd()
out, err = capsys.readouterr()
readme = "\nrocketchat\n===========\n\n# Install\n\nkpm install rocketchat\n\n"
assert out == readme + "\n"
assert inspectcmd._render_dict() == {'inspect': 'foo/bar', 'output': readme}
| {
"repo_name": "app-registry/appr",
"path": "tests/commands/test_inspect.py",
"copies": "2",
"size": "2449",
"license": "apache-2.0",
"hash": -6173246785865911000,
"line_mean": 41.9649122807,
"line_max": 117,
"alpha_frac": 0.6427113107,
"autogenerated": false,
"ratio": 3.0727728983688833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47154842090688837,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import requests
import requests_mock
import appr
from appr.client import DEFAULT_PREFIX, DEFAULT_REGISTRY, ApprClient
from appr.commands.version import VersionCmd
def get_versioncmd(cli_parser, args=[]):
options = cli_parser.parse_args(["version", DEFAULT_REGISTRY] + args)
return VersionCmd(options)
def test_version_registry_host(cli_parser):
versioncmd = get_versioncmd(cli_parser)
assert versioncmd.registry_host == DEFAULT_REGISTRY
def test_version_init(cli_parser):
versioncmd = get_versioncmd(cli_parser)
assert versioncmd.api_version is None
assert versioncmd.registry_host == "http://localhost:5000"
assert VersionCmd.name == "version"
def test_get_version(cli_parser, capsys):
versioncmd = get_versioncmd(cli_parser)
response = '{"appr-server": "0.23.0"}'
with requests_mock.mock() as m:
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/version",
complete_qs=True,
text=response)
versioncmd.exec_cmd()
out, err = capsys.readouterr()
assert out == "Api-version: {u'appr-server': u'0.23.0'}\nClient-version: %s\n""" % appr.__version__
assert versioncmd._render_dict() == {'api-version': {u'appr-server': u'0.23.0'}, 'client-version': appr.__version__}
assert versioncmd.api_version == {u'appr-server': u'0.23.0'}
def test_get_version_api_error(cli_parser, capsys):
versioncmd = get_versioncmd(cli_parser)
response = '{"appr-server": "0.23.0"}'
with requests_mock.mock() as m:
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/version",
complete_qs=True,
text=response, status_code=500)
versioncmd.exec_cmd()
out, err = capsys.readouterr()
assert out == "Api-version: .. Connection error\nClient-version: %s\n""" % appr.__version__
assert versioncmd._render_dict() == {'api-version': ".. Connection error", 'client-version': appr.__version__}
| {
"repo_name": "app-registry/appr",
"path": "tests/commands/test_version.py",
"copies": "2",
"size": "2030",
"license": "apache-2.0",
"hash": 6998490971505671000,
"line_mean": 37.3018867925,
"line_max": 124,
"alpha_frac": 0.660591133,
"autogenerated": false,
"ratio": 3.4003350083752095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000672879634033907,
"num_lines": 53
} |
from __future__ import absolute_import, division, print_function
import pytest
import six
import drms
from drms.config import ServerConfig, register_server, _server_configs
def test_create_config_basic():
cfg = ServerConfig(name='TEST')
valid_keys = ServerConfig._valid_keys
assert 'name' in valid_keys
assert 'encoding' in valid_keys
for k in valid_keys:
v = getattr(cfg, k)
if k == 'name':
assert v == 'TEST'
elif k == 'encoding':
assert v == 'latin1'
else:
assert v is None
def test_create_config_missing_name():
with pytest.raises(ValueError):
cfg = ServerConfig()
def test_copy_config():
cfg = ServerConfig(name='TEST')
assert cfg.name == 'TEST'
cfg2 = cfg.copy()
assert cfg2 is not cfg
assert cfg2.name == 'TEST'
cfg.name = 'MUH'
assert cfg.name != cfg2.name
def test_register_server():
cfg = ServerConfig(name='TEST')
assert 'test' not in _server_configs
register_server(cfg)
assert 'test' in _server_configs
del _server_configs['test']
assert 'test' not in _server_configs
def test_register_server_existing():
assert 'jsoc' in _server_configs
cfg = ServerConfig(name='jsoc')
with pytest.raises(RuntimeError):
register_server(cfg)
assert 'jsoc' in _server_configs
def test_config_jsoc():
assert 'jsoc' in _server_configs
cfg = _server_configs['jsoc']
assert cfg.name.lower() == 'jsoc'
assert isinstance(cfg.encoding, six.string_types)
assert isinstance(cfg.cgi_show_series, six.string_types)
assert isinstance(cfg.cgi_jsoc_info, six.string_types)
assert isinstance(cfg.cgi_jsoc_fetch, six.string_types)
assert isinstance(cfg.cgi_check_address, six.string_types)
assert isinstance(cfg.cgi_show_series_wrapper, six.string_types)
assert isinstance(cfg.show_series_wrapper_dbhost, six.string_types)
assert cfg.http_download_baseurl.startswith('http://')
assert cfg.ftp_download_baseurl.startswith('ftp://')
baseurl = cfg.cgi_baseurl
assert baseurl.startswith('http://')
assert cfg.url_show_series.startswith(baseurl)
assert cfg.url_jsoc_info.startswith(baseurl)
assert cfg.url_jsoc_fetch.startswith(baseurl)
assert cfg.url_check_address.startswith(baseurl)
assert cfg.url_show_series_wrapper.startswith(baseurl)
def test_config_kis():
assert 'kis' in _server_configs
cfg = _server_configs['kis']
assert cfg.name.lower() == 'kis'
assert isinstance(cfg.encoding, six.string_types)
assert isinstance(cfg.cgi_show_series, six.string_types)
assert isinstance(cfg.cgi_jsoc_info, six.string_types)
assert cfg.cgi_jsoc_fetch is None
assert cfg.cgi_check_address is None
assert cfg.cgi_show_series_wrapper is None
assert cfg.show_series_wrapper_dbhost is None
assert cfg.http_download_baseurl is None
assert cfg.ftp_download_baseurl is None
baseurl = cfg.cgi_baseurl
assert baseurl.startswith('http://')
assert cfg.url_show_series.startswith(baseurl)
assert cfg.url_jsoc_info.startswith(baseurl)
assert cfg.url_jsoc_fetch is None
assert cfg.url_check_address is None
assert cfg.url_show_series_wrapper is None
@pytest.mark.parametrize('server_name, operation, expected', [
('jsoc', 'series', True),
('jsoc', 'info', True),
('jsoc', 'query', True),
('jsoc', 'email', True),
('jsoc', 'export', True),
('kis', 'series', True),
('kis', 'info', True),
('kis', 'query', True),
('kis', 'email', False),
('kis', 'export', False),
])
def test_supported(server_name, operation, expected):
cfg = _server_configs[server_name]
assert cfg.check_supported(operation) == expected
@pytest.mark.parametrize('server_name, operation', [
('jsoc', 'bar'),
('kis', 'foo'),
])
def test_supported_invalid_operation(server_name, operation):
cfg = _server_configs[server_name]
with pytest.raises(ValueError):
cfg.check_supported(operation)
| {
"repo_name": "kbg/drms",
"path": "drms/tests/test_config.py",
"copies": "1",
"size": "4044",
"license": "mit",
"hash": 7877977519697323000,
"line_mean": 29.1791044776,
"line_max": 71,
"alpha_frac": 0.668892186,
"autogenerated": false,
"ratio": 3.486206896551724,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4655099082551724,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import sys
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--setuponly', '--setup-only', action="store_true",
help="only setup fixtures, do not execute tests.")
group.addoption('--setupshow', '--setup-show', action="store_true",
help="show setup of fixtures while executing tests.")
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef, request):
yield
config = request.config
if config.option.setupshow:
if hasattr(request, 'param'):
# Save the fixture parameter so ._show_fixture_action() can
# display it now and during the teardown (in .finish()).
if fixturedef.ids:
if callable(fixturedef.ids):
fixturedef.cached_param = fixturedef.ids(request.param)
else:
fixturedef.cached_param = fixturedef.ids[
request.param_index]
else:
fixturedef.cached_param = request.param
_show_fixture_action(fixturedef, 'SETUP')
def pytest_fixture_post_finalizer(fixturedef):
if hasattr(fixturedef, "cached_result"):
config = fixturedef._fixturemanager.config
if config.option.setupshow:
_show_fixture_action(fixturedef, 'TEARDOWN')
if hasattr(fixturedef, "cached_param"):
del fixturedef.cached_param
def _show_fixture_action(fixturedef, msg):
config = fixturedef._fixturemanager.config
capman = config.pluginmanager.getplugin('capturemanager')
if capman:
out, err = capman.suspend_global_capture()
tw = config.get_terminal_writer()
tw.line()
tw.write(' ' * 2 * fixturedef.scopenum)
tw.write('{step} {scope} {fixture}'.format(
step=msg.ljust(8), # align the output to TEARDOWN
scope=fixturedef.scope[0].upper(),
fixture=fixturedef.argname))
if msg == 'SETUP':
deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
if deps:
tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
if hasattr(fixturedef, 'cached_param'):
tw.write('[{0}]'.format(fixturedef.cached_param))
if capman:
capman.resume_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setuponly:
config.option.setupshow = True
| {
"repo_name": "Varentsov/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/setuponly.py",
"copies": "16",
"size": "2576",
"license": "mpl-2.0",
"hash": 8185484454531317000,
"line_mean": 33.8108108108,
"line_max": 77,
"alpha_frac": 0.6246118012,
"autogenerated": false,
"ratio": 3.956989247311828,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 74
} |
from __future__ import absolute_import, division, print_function
import pytest
import sys
pytest.importorskip('dynd')
from odo import create, convert, discover
from dynd import nd
import numpy as np
import datashape
@pytest.fixture
def x():
ds = datashape.dshape('3 * int32')
return convert(nd.array, [1, 2, 3], dshape=ds)
def test_create():
ds = datashape.dshape('5 * int32')
d = create(nd.array, dshape=ds)
assert discover(d) == ds
def test_simple_convert(x):
assert isinstance(x, nd.array)
assert convert(list, x) == [1, 2, 3]
@pytest.mark.parametrize(['typ', 'expected'],
[(list, [1, 2, 3]),
(tuple, (1, 2, 3)),
(np.ndarray, np.array([1, 2, 3]))])
def test_convert_different_types(x, typ, expected):
y = convert(typ, x)
assert isinstance(y, typ)
assert isinstance(convert(nd.array, y), nd.array)
assert all(lhs == rhs for lhs, rhs in zip(y, expected))
def test_convert_struct():
x = nd.array([('a', 1)], type='1 * {a: string, b: int32}')
assert convert(list, x) == [{'a': 'a', 'b': 1}]
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/tests/test_dynd.py",
"copies": "2",
"size": "1131",
"license": "bsd-3-clause",
"hash": -1305525275589500000,
"line_mean": 25.3023255814,
"line_max": 64,
"alpha_frac": 0.5923961096,
"autogenerated": false,
"ratio": 3.15041782729805,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474281393689805,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from characteristic import (
attributes,
with_cmp,
with_init,
with_repr,
)
@with_cmp(["a", "b"])
class CmpC(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestWithCmp(object):
def test_equal(self):
"""
Equal objects are detected as equal.
"""
assert CmpC(1, 2) == CmpC(1, 2)
assert not (CmpC(1, 2) != CmpC(1, 2))
def test_unequal_same_class(self):
"""
Unequal objects of correct type are detected as unequal.
"""
assert CmpC(1, 2) != CmpC(2, 1)
assert not (CmpC(1, 2) == CmpC(2, 1))
def test_unequal_different_class(self):
"""
Unequal objects of differnt type are detected even if their attributes
match.
"""
class NotCmpC(object):
a = 1
b = 2
assert CmpC(1, 2) != NotCmpC()
assert not (CmpC(1, 2) == NotCmpC())
def test_lt(self):
"""
__lt__ compares objects as tuples of attribute values.
"""
for a, b in [
((1, 2), (2, 1)),
((1, 2), (1, 3)),
(("a", "b"), ("b", "a")),
]:
assert CmpC(*a) < CmpC(*b)
def test_lt_unordable(self):
"""
__lt__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__lt__(42))
def test_le(self):
"""
__le__ compares objects as tuples of attribute values.
"""
for a, b in [
((1, 2), (2, 1)),
((1, 2), (1, 3)),
((1, 1), (1, 1)),
(("a", "b"), ("b", "a")),
(("a", "b"), ("a", "b")),
]:
assert CmpC(*a) <= CmpC(*b)
def test_le_unordable(self):
"""
__le__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__le__(42))
def test_gt(self):
"""
__gt__ compares objects as tuples of attribute values.
"""
for a, b in [
((2, 1), (1, 2)),
((1, 3), (1, 2)),
(("b", "a"), ("a", "b")),
]:
assert CmpC(*a) > CmpC(*b)
def test_gt_unordable(self):
"""
__gt__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__gt__(42))
def test_ge(self):
"""
__ge__ compares objects as tuples of attribute values.
"""
for a, b in [
((2, 1), (1, 2)),
((1, 3), (1, 2)),
((1, 1), (1, 1)),
(("b", "a"), ("a", "b")),
(("a", "b"), ("a", "b")),
]:
assert CmpC(*a) >= CmpC(*b)
def test_ge_unordable(self):
"""
__ge__ returns NotImplemented if classes differ.
"""
assert NotImplemented == (CmpC(1, 2).__ge__(42))
def test_hash(self):
"""
__hash__ returns different hashes for different values.
"""
assert hash(CmpC(1, 2)) != hash(CmpC(1, 1))
@with_repr(["a", "b"])
class ReprC(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestReprAttrs(object):
def test_repr(self):
"""
Test repr returns a sensible value.
"""
assert "<ReprC(a=1, b=2)>" == repr(ReprC(1, 2))
@with_init(["a", "b"])
class InitC(object):
def __init__(self):
if self.a == self.b:
raise ValueError
class TestWithInit(object):
def test_sets_attributes(self):
"""
The attributes are initialized using the passed keywords.
"""
obj = InitC(a=1, b=2)
assert 1 == obj.a
assert 2 == obj.b
def test_custom_init(self):
"""
The class initializer is called too.
"""
with pytest.raises(ValueError):
InitC(a=1, b=1)
def test_passes_args(self):
"""
All positional parameters are passed to the original initializer.
"""
@with_init(["a"])
class InitWithArg(object):
def __init__(self, arg):
self.arg = arg
obj = InitWithArg(42, a=1)
assert 42 == obj.arg
assert 1 == obj.a
def test_passes_remaining_kw(self):
"""
Keyword arguments that aren't used for attributes are passed to the
original initializer.
"""
@with_init(["a"])
class InitWithKWArg(object):
def __init__(self, kw_arg=None):
self.kw_arg = kw_arg
obj = InitWithKWArg(a=1, kw_arg=42)
assert 42 == obj.kw_arg
assert 1 == obj.a
def test_does_not_pass_attrs(self):
"""
The attributes are removed from the keyword arguments before they are
passed to the original initializer.
"""
@with_init(["a"])
class InitWithKWArgs(object):
def __init__(self, **kw):
assert "a" not in kw
assert "b" in kw
InitWithKWArgs(a=1, b=42)
def test_defaults(self):
"""
If defaults are passed, they are used as fallback.
"""
@with_init(["a", "b"], defaults={"b": 2})
class InitWithDefaults(object):
pass
obj = InitWithDefaults(a=1)
assert 2 == obj.b
def test_missing_arg(self):
"""
Raises `ValueError` if a value isn't passed.
"""
with pytest.raises(ValueError):
InitC(a=1)
@attributes(["a", "b"], create_init=True)
class MagicWithInitC(object):
pass
@attributes(["a", "b"], create_init=False)
class MagicWithoutInitC(object):
pass
class TestAttributes(object):
def test_leaves_init_alone(self):
"""
If *create_init* is `False`, leave __init__ alone.
"""
obj = MagicWithoutInitC()
with pytest.raises(AttributeError):
obj.a
with pytest.raises(AttributeError):
obj.b
def test_wraps_init(self):
"""
If *create_init* is `True`, build initializer.
"""
obj = MagicWithInitC(a=1, b=2)
assert 1 == obj.a
assert 2 == obj.b
| {
"repo_name": "alex/characteristic",
"path": "test_characteristic.py",
"copies": "1",
"size": "6275",
"license": "mit",
"hash": -1454932141676181000,
"line_mean": 24.4048582996,
"line_max": 78,
"alpha_frac": 0.4801593625,
"autogenerated": false,
"ratio": 3.6271676300578033,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607326992557803,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from contextlib import contextmanager
from odo.utils import tmpfile
from odo.chunks import chunks
from odo import into, append, convert, resource, discover, odo
import datashape
import pandas as pd
from datetime import datetime
import numpy as np
pytest.importorskip('tables')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
@contextmanager
def file(df):
with tmpfile('.hdf5') as fn:
f = pd.HDFStore(fn)
f.put('/data', df, format='table', append=True)
try:
yield fn, f, f.get_storer('/data')
finally:
f.close()
def test_discover():
with file(df) as (fn, f, dset):
assert str(discover(dset)) == str(discover(df))
assert str(discover(f)) == str(discover({'data': df}))
def test_discover_nested():
with tmpfile('hdf5') as fn:
df.to_hdf(fn, '/a/b/data')
df.to_hdf(fn, '/a/b/data2')
df.to_hdf(fn, '/a/data')
hdf = pd.HDFStore(fn)
try:
assert discover(hdf) == discover(
{'a': {'b': {'data': df, 'data2': df}, 'data': df}}
)
finally:
hdf.close()
def eq(a, b):
if isinstance(a, pd.DataFrame):
a = into(np.ndarray, a)
if isinstance(b, pd.DataFrame):
b = into(np.ndarray, b)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset)
assert eq(convert(np.ndarray, c), df)
def test_resource_no_info():
with tmpfile('.hdf5') as fn:
r = resource('hdfstore://' + fn)
try:
assert isinstance(r, pd.HDFStore)
finally:
r.close()
def test_resource_of_dataset():
with tmpfile('.hdf5') as fn:
ds = datashape.dshape('{x: int32, y: 3 * int32}')
r = resource('hdfstore://'+fn+'::/x', dshape=ds)
try:
assert r
finally:
r.parent.close()
def test_append():
with file(df) as (fn, f, dset):
append(dset, df)
append(dset, df)
assert discover(dset).shape == (len(df) * 3,)
def test_into_resource():
with tmpfile('.hdf5') as fn:
d = into('hdfstore://' + fn + '::/x', df)
try:
assert discover(d) == discover(df)
assert eq(into(pd.DataFrame, d), df)
finally:
d.parent.close()
def test_convert_pandas():
with file(df) as (fn, f, dset):
assert eq(convert(pd.DataFrame, dset), df)
def test_convert_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
assert len(list(c)) == 2
assert eq(convert(pd.DataFrame, c), df)
def test_append_chunks():
with file(df) as (fn, f, dset):
append(dset, chunks(pd.DataFrame)([df, df]))
assert discover(dset).shape[0] == len(df) * 3
def test_append_other():
with tmpfile('.hdf5') as fn:
x = into(np.ndarray, df)
dset = into('hdfstore://'+fn+'::/data', x)
try:
assert discover(dset) == discover(df)
finally:
dset.parent.close()
def test_fixed_shape():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
try:
assert isinstance(r.shape, list)
assert discover(r).shape == (len(df),)
finally:
r.parent.close()
def test_fixed_convert():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
try:
assert eq(convert(pd.DataFrame, r), df)
finally:
r.parent.close()
def test_append_vs_write():
import pandas.util.testing as tm
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo', append=True)
store = odo(df, 'hdfstore://%s::foo' % fn)
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, pd.concat([df, df]))
with tmpfile('.hdf5') as fn:
store = odo(df, 'hdfstore://%s::foo' % fn, mode='w')
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, df)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/tests/test_hdfstore.py",
"copies": "5",
"size": "4628",
"license": "bsd-3-clause",
"hash": 9115117688643503000,
"line_mean": 24.7111111111,
"line_max": 70,
"alpha_frac": 0.5313310285,
"autogenerated": false,
"ratio": 3.3199426111908177,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6351273639690818,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from datashape import discover, dshape
from blaze.compute.core import (compute_up, compute, bottom_up_until_type_break,
top_then_bottom_then_top_again_etc,
swap_resources_into_scope)
from blaze.expr import by, symbol, Expr, Symbol
from blaze.dispatch import dispatch
from blaze.compatibility import raises
from blaze.utils import example
import pandas as pd
import numpy as np
def test_errors():
t = symbol('t', 'var * {foo: int}')
with raises(NotImplementedError):
compute_up(by(t, count=t.count()), 1)
def test_optimize():
class Foo(object):
pass
s = symbol('s', '5 * {x: int, y: int}')
@dispatch(Expr, Foo)
def compute_down(expr, foo):
return str(expr)
assert compute(s.x * 2, Foo()) == "s.x * 2"
@dispatch(Expr, Foo)
def optimize(expr, foo):
return expr + 1
assert compute(s.x * 2, Foo()) == "(s.x * 2) + 1"
def test_bottom_up_until_type_break():
s = symbol('s', 'var * {name: string, amount: int}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = (s.amount + 1).distinct()
expr, scope = bottom_up_until_type_break(e, {s: data})
amount = symbol('amount', 'var * int64', token=1)
assert expr.isidentical(amount)
assert len(scope) == 1
assert amount in scope
assert (scope[amount] == np.array([101, 201, 301], dtype='i4')).all()
# This computation has a type change midstream, so we stop and get the
# unfinished computation.
e = s.amount.sum() + 1
expr, scope = bottom_up_until_type_break(e, {s: data})
amount_sum = symbol('amount_sum', 'int64')
assert expr.isidentical(amount_sum + 1)
assert len(scope) == 1
assert amount_sum in scope
assert scope[amount_sum] == 600
# ensure that we work on binops with one child
x = symbol('x', 'real')
expr, scope = bottom_up_until_type_break(x + x, {x: 1})
assert len(scope) == 1
x2 = list(scope.keys())[0]
assert isinstance(x2, Symbol)
assert isinstance(expr, Symbol)
assert scope[x2] == 2
def test_top_then_bottom_then_top_again_etc():
s = symbol('s', 'var * {name: string, amount: int32}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = s.amount.sum() + 1
assert top_then_bottom_then_top_again_etc(e, {s: data}) == 601
def test_swap_resources_into_scope():
from blaze import Data
t = Data([1, 2, 3], dshape='3 * int', name='t')
expr, scope = swap_resources_into_scope(t.head(2), {t: t.data})
assert t._resources()
assert not expr._resources()
assert t not in scope
def test_compute_up_on_dict():
d = {'a': [1, 2, 3], 'b': [4, 5, 6]}
assert str(discover(d)) == str(dshape('{a: 3 * int64, b: 3 * int64}'))
s = symbol('s', discover(d))
assert compute(s.a, {s: d}) == [1, 2, 3]
def test_pre_compute_on_multiple_datasets_is_selective():
from odo import CSV
from blaze import Data
from blaze.cached import CachedDataset
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
iris = CSV(example('iris.csv'))
dset = CachedDataset({'df': df, 'iris': iris})
d = Data(dset)
assert str(compute(d.df.amount)) == str(df.amount)
def test_raises_on_valid_expression_but_no_implementation():
class MyExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
t = symbol('t', 'var * {amount: real}')
expr = MyExpr(t.amount)
df = [(1.0,), (2.0,), (3.0,)]
with pytest.raises(NotImplementedError):
compute(expr, df)
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/compute/tests/test_core_compute.py",
"copies": "6",
"size": "4061",
"license": "bsd-3-clause",
"hash": -2039962174858273300,
"line_mean": 28.2158273381,
"line_max": 80,
"alpha_frac": 0.5717803497,
"autogenerated": false,
"ratio": 3.2358565737051794,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033403229107535225,
"num_lines": 139
} |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime
import pandas as pd
import pandas.util.testing as tm
import numpy as np
from pandas import DataFrame, Series
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, Distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
all, var, std)
from blaze.compatibility import builtins, xfail
t = symbol('t', 'var * {name: string, amount: int, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
tm.assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
tm.assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
tm.assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
tm.assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
tm.assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
tm.assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
tm.assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = Distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
tm.assert_series_equal(compute(t['amount'].sort('amount'), df),
expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
tm.assert_series_equal(compute(t['amount'].sort(), df),
expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
tm.assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
tm.assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
tm.assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
tm.assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
tm.assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
tm.assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
tm.assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
tm.assert_series_equal(compute(getattr(t.when, attr), df),
Series([1, 1, 1]))
def test_frame_slice():
tm.assert_series_equal(compute(t[0], df), df.iloc[0])
tm.assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
tm.assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
tm.assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
tm.assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]')
tm.assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]')
result = compute(s.truncate(nanoseconds=20), data)
tm.assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]')
tm.assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]')
tm.assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/tests/test_pandas_compute.py",
"copies": "1",
"size": "22170",
"license": "bsd-3-clause",
"hash": 6002215032312039000,
"line_mean": 32.7957317073,
"line_max": 80,
"alpha_frac": 0.5312584574,
"autogenerated": false,
"ratio": 3.2981255578696818,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43293840152696816,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import pandas as pd
import pandas.util.testing as tm
import numpy as np
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, Distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = Distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df),
expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df),
expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
@pytest.mark.parametrize('keys', [[1], [2, 3]])
def test_isin(keys):
expr = t[t.id.isin(keys)]
result = compute(expr, df)
expected = df.loc[df.id.isin(keys)]
tm.assert_frame_equal(result, expected)
def test_nunique_table():
expr = t.nunique()
result = compute(expr, df)
assert result == len(df.drop_duplicates())
def test_str_concat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s + 'a'
assert (compute(expr, a) == (a + 'a')).all()
def test_str_repeat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.repeat(3)
assert (compute(expr, a) == (a * 3)).all()
def test_str_interp():
a = Series(('%s', '%s', '%s'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.interp(1)
assert (compute(expr, a) == (a % 1)).all()
def test_timedelta_arith():
series = Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(series))
delta = timedelta(days=1)
assert (compute(sym + delta, series) == series + delta).all()
assert (compute(sym - delta, series) == series - delta).all()
def test_coerce_series():
s = pd.Series(list('123'), name='a')
t = symbol('t', discover(s))
result = compute(t.coerce(to='int64'), s)
expected = pd.Series([1, 2, 3], name=s.name)
assert_series_equal(result, expected)
def test_concat_arr():
s_data = Series(np.arange(15))
t_data = Series(np.arange(15, 30))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) == Series(np.arange(30))
).all()
def test_concat_mat():
s_data = DataFrame(np.arange(15).reshape(5, 3), columns=list('abc'))
t_data = DataFrame(np.arange(15, 30).reshape(5, 3), columns=list('abc'))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
tm.assert_frame_equal(
compute(concat(s, t), {s: s_data, t: t_data}),
pd.DataFrame(np.arange(30).reshape(10, 3), columns=list('abc')),
)
def test_count_keepdims_frame():
df = pd.DataFrame(dict(a=[1, 2, 3, np.nan]))
s = symbol('s', discover(df))
assert_series_equal(compute(s.count(keepdims=True), df),
pd.Series([df.shape[0]], name='s_count'))
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/tests/test_pandas_compute.py",
"copies": "1",
"size": "24994",
"license": "bsd-3-clause",
"hash": -8789827874719136000,
"line_mean": 32.0171730515,
"line_max": 80,
"alpha_frac": 0.5355685364,
"autogenerated": false,
"ratio": 3.262924281984334,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9295501182625516,
"avg_score": 0.0005983271517634761,
"num_lines": 757
} |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df),
expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df),
expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
def test_notnull():
assert (compute(nt.name.notnull(), ndf) == ndf.name.notnull()).all()
def test_isnan():
assert (compute(nt.amount.isnan(), ndf) == ndf.amount.isnull()).all()
@pytest.mark.parametrize('keys', [[1], [2, 3]])
def test_isin(keys):
expr = t[t.id.isin(keys)]
result = compute(expr, df)
expected = df.loc[df.id.isin(keys)]
tm.assert_frame_equal(result, expected)
def test_nunique_table():
expr = t.nunique()
result = compute(expr, df)
assert result == len(df.drop_duplicates())
def test_str_concat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s + 'a'
assert (compute(expr, a) == (a + 'a')).all()
def test_str_repeat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.repeat(3)
assert (compute(expr, a) == (a * 3)).all()
def test_str_interp():
a = Series(('%s', '%s', '%s'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.interp(1)
assert (compute(expr, a) == (a % 1)).all()
def test_timedelta_arith():
series = Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(series))
delta = timedelta(days=1)
assert (compute(sym + delta, series) == series + delta).all()
assert (compute(sym - delta, series) == series - delta).all()
def test_coerce_series():
s = pd.Series(list('123'), name='a')
t = symbol('t', discover(s))
result = compute(t.coerce(to='int64'), s)
expected = pd.Series([1, 2, 3], name=s.name)
assert_series_equal(result, expected)
def test_concat_arr():
s_data = Series(np.arange(15))
t_data = Series(np.arange(15, 30))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) == Series(np.arange(30))
).all()
def test_concat_mat():
s_data = DataFrame(np.arange(15).reshape(5, 3), columns=list('abc'))
t_data = DataFrame(np.arange(15, 30).reshape(5, 3), columns=list('abc'))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
tm.assert_frame_equal(
compute(concat(s, t), {s: s_data, t: t_data}),
pd.DataFrame(np.arange(30).reshape(10, 3), columns=list('abc')),
)
def test_count_keepdims_frame():
df = pd.DataFrame(dict(a=[1, 2, 3, np.nan]))
s = symbol('s', discover(df))
assert_series_equal(compute(s.count(keepdims=True), df),
pd.Series([df.shape[0]], name='s_count'))
def test_time_field():
data = pd.Series(pd.date_range(start='20120101', end='20120102', freq='H'))
s = symbol('s', discover(data))
result = compute(s.time, data)
expected = data.dt.time
expected.name = 's_time'
assert_series_equal(result, expected)
| {
"repo_name": "alexmojaki/blaze",
"path": "blaze/compute/tests/test_pandas_compute.py",
"copies": "1",
"size": "26918",
"license": "bsd-3-clause",
"hash": 3328872096729691000,
"line_mean": 31.8669108669,
"line_max": 80,
"alpha_frac": 0.5332119771,
"autogenerated": false,
"ratio": 3.2470446320868516,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9277326446299721,
"avg_score": 0.0005860325774261032,
"num_lines": 819
} |
from __future__ import absolute_import, division, print_function
import pytest
from flask import json
from datetime import datetime
from pandas import DataFrame
from blaze.utils import example
from blaze import discover, Symbol, by, CSV, compute, join, into
from blaze.server.server import Server, to_tree, from_tree
from blaze.server.index import emit_index
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
server = Server(datasets={'accounts': accounts,
'cities': cities})
test = server.app.test_client()
def test_datasets():
response = test.get('/datasets.json')
assert json.loads(response.data) == {'accounts': str(discover(accounts)),
'cities': str(discover(cities))}
def test_bad_responses():
assert 'OK' not in test.post('/compute/accounts.json',
data = json.dumps(500),
content_type='application/json').status
assert 'OK' not in test.post('/compute/non-existent-table.json',
data = json.dumps(0),
content_type='application/json').status
assert 'OK' not in test.post('/compute/accounts.json').status
def test_to_from_json():
t = Symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = Symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
expected = {'op': 'sum',
'args': [{'op': 'Field',
'args':
[
{'op': 'Symbol',
'args': [
't',
'var * { name : string, amount : int32 }',
]
},
'amount'
]
}, [0], False]
}
assert to_tree(expr) == expected
def test_to_from_tree_namespace():
t = Symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = Symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
def test_compute():
t = Symbol('t', 'var * {name: string, amount: int}')
expr = t.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post('/compute/accounts.json',
data = json.dumps(query),
content_type='application/json')
assert 'OK' in response.status
assert json.loads(response.data)['data'] == expected
def test_compute_with_namespace():
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post('/compute/accounts.json',
data = json.dumps(query),
content_type='application/json')
assert 'OK' in response.status
assert json.loads(response.data)['data'] == expected
@pytest.fixture
def iris_server():
iris = CSV(example('iris.csv'))
server = Server(datasets={'iris': iris})
return server.app.test_client()
iris = CSV(example('iris.csv'))
def test_compute_with_variable_in_namespace(iris_server):
test = iris_server
t = Symbol('t', iris.dshape)
pl = Symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = json.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post('/compute/iris.json', data=blob,
content_type='application/json')
assert 'OK' in resp.status
result = json.loads(resp.data)['data']
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert result == expected
def test_compute_by_with_summary(iris_server):
test = iris_server
t = Symbol('t', iris.dshape)
expr = by(t.species, max=t.petal_length.max(), sum=t.petal_width.sum())
tree = to_tree(expr)
blob = json.dumps({'expr': tree})
resp = test.post('/compute/iris.json', data=blob,
content_type='application/json')
assert 'OK' in resp.status
result = json.loads(resp.data)['data']
expected = compute(expr, iris)
assert result == list(map(list, expected))
def test_compute_column_wise(iris_server):
test = iris_server
t = Symbol('t', iris.dshape)
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = json.dumps({'expr': tree})
resp = test.post('/compute/iris.json', data=blob,
content_type='application/json')
assert 'OK' in resp.status
result = json.loads(resp.data)['data']
expected = compute(expr, iris)
assert list(map(tuple, result)) == list(map(tuple, expected))
def test_multi_expression_compute():
a = Symbol('accounts', discover(accounts))
c = Symbol('cities', discover(cities))
expr = join(a, c)
resp = test.post('/compute.json',
data=json.dumps({'expr': to_tree(expr)}),
content_type='application/json')
assert 'OK' in resp.status
result = json.loads(resp.data)['data']
expected = compute(expr, {a: accounts, c: cities})
assert list(map(tuple, result))== into(list, expected)
| {
"repo_name": "vitan/blaze",
"path": "blaze/server/tests/test_server.py",
"copies": "1",
"size": "5959",
"license": "bsd-3-clause",
"hash": -1296069222700162000,
"line_mean": 30.6968085106,
"line_max": 78,
"alpha_frac": 0.5474072831,
"autogenerated": false,
"ratio": 3.731371321227301,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9772773119228162,
"avg_score": 0.0012010970198278374,
"num_lines": 188
} |
from __future__ import absolute_import, division, print_function
import pytest
from ..misc import as_variable_name, file_format, DeferredMethod, nonpartial, lookup_class, as_list
def test_as_variable_name():
def check(input, expected):
assert as_variable_name(input) == expected
tests = [('x', 'x'),
('x2', 'x2'),
('2x', '_2x'),
('x!', 'x_'),
('x y z', 'x_y_z'),
('_XY', '_XY')
]
for input, expected in tests:
yield check, input, expected
class TestFileFormat(object):
def test_gz(self):
fmt = file_format('test.tar.gz')
assert fmt == 'tar'
def test_normal(self):
fmt = file_format('test.data')
assert fmt == 'data'
def test_underscores(self):
fmt = file_format('test_file.fits_file')
assert fmt == 'fits_file'
def test_multidot(self):
fmt = file_format('test.a.b.c')
assert fmt == 'c'
def test_nodot(self):
fmt = file_format('test')
assert fmt == ''
def test_deferred_method():
class Test(object):
def __init__(self):
self.a = 1
def change_a(self):
self.a = 2
t = Test()
Test.change_a = DeferredMethod(Test.change_a)
t.change_a()
assert t.a == 1
Test.change_a.execute_deferred_calls()
assert t.a == 2
def test_nonpartial():
def test(a=1, b=2):
pass
test_wrapped = nonpartial(test)
test_wrapped(a=1, b=2, c=3)
def test_lookup_class():
lookup_class('glue.utils.misc.DeferredMethod') is DeferredMethod
with pytest.raises(ValueError) as exc:
lookup_class('gluh.utils.misc.DeferredMethod') is None
assert exc.value.args[0] == "Module 'gluh.utils.misc' not found"
with pytest.raises(ValueError) as exc:
lookup_class('glue.utils.misc.DeferredMethods') is None
assert exc.value.args[0] == "Object 'glue.utils.misc.DeferredMethods' not found"
def test_as_list():
as_list(1) == [1]
as_list([2, 3]) == [2, 3]
# TODO: add test for PropertySetMixin
| {
"repo_name": "saimn/glue",
"path": "glue/utils/tests/test_misc.py",
"copies": "1",
"size": "2111",
"license": "bsd-3-clause",
"hash": -3686561961446025700,
"line_mean": 20.9895833333,
"line_max": 99,
"alpha_frac": 0.5722406442,
"autogenerated": false,
"ratio": 3.3668261562998407,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437933222068469,
"avg_score": 0.00022671568627450982,
"num_lines": 96
} |
from __future__ import absolute_import, division, print_function
import pytest
from odo.numpy_dtype import dshape_to_pandas, unit_to_dtype, dshape_to_numpy
from datashape import dshape
import numpy as np
@pytest.mark.parametrize(
['ds', 'expected'],
[
('decimal[9,2]', np.float64),
('decimal[9]', np.int32),
('?decimal[9]', np.float32),
('?decimal[1,0]', np.float16),
]
)
def test_decimal(ds, expected):
assert unit_to_dtype(dshape(ds)) == expected
@pytest.mark.parametrize(
['ds', 'field'],
[('var * {teststr1: option[string[4]]}', 'teststr1'),
('var * {teststr2: option[string["ascii"]]}', 'teststr2')]
)
def test_parameterized_option_instances(ds, field):
dtypes, _ = dshape_to_pandas(dshape(ds))
assert isinstance(dtypes[field], np.dtype)
@pytest.mark.parametrize(
'ds',
[
'option[datetime[tz="EST"]]',
'option[timedelta[unit="D"]]'
]
)
def test_unit_to_dtype(ds):
assert isinstance(unit_to_dtype(ds), np.dtype)
@pytest.mark.parametrize(
['ds', 'expected'],
[
('{a: int32}', ({'a': np.dtype('int32')}, [])),
('{a: int32, when: datetime}', ({'a': np.dtype('int32')}, ['when'])),
('{a: ?int64}', ({'a': np.dtype('float64')}, []))
]
)
def test_dshape_to_pandas(ds, expected):
assert dshape_to_pandas(ds) == expected
@pytest.mark.parametrize(
['ds', 'dt'],
[
('int32', 'int32'),
('?int32', 'float32'),
(
'{name: string[5, "ascii"], amount: ?int32}',
[('name', 'S5'), ('amount', '<f4')]
),
('(int32, float32)', [('f0', '<i4'), ('f1', '<f4')])
]
)
def test_dshape_to_numpy(ds, dt):
assert dshape_to_numpy(ds) == np.dtype(dt)
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/tests/test_numpy_dtype.py",
"copies": "4",
"size": "1756",
"license": "bsd-3-clause",
"hash": -817289840533496600,
"line_mean": 24.4492753623,
"line_max": 77,
"alpha_frac": 0.5506833713,
"autogenerated": false,
"ratio": 3.0328151986183074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 69
} |
from __future__ import absolute_import, division, print_function
import pytest
from qtpy import QtTest
from qtpy.QtCore import Qt
from glue.core import Data
from ..equation_editor import EquationEditorDialog
class TestEquationEditor:
def setup_method(self, method):
self.data = Data(x=[1, 2, 3], y=[3, 4, 5])
self.dialog = EquationEditorDialog(data=self.data, equation='')
def test_empty(self):
assert not self.dialog.ui.button_ok.isEnabled()
assert self.dialog.ui.label_status.text() == ''
@pytest.mark.parametrize('expression', ['1', '1 + {x}', '1 * np.sin({y}) + {x}'])
def test_valid_cases(self, expression):
self.dialog.expression.insertPlainText(expression)
assert self.dialog.ui.button_ok.isEnabled()
assert self.dialog.ui.label_status.text() == 'Valid expression'
self.dialog.ui.button_ok.click()
assert self.dialog._get_raw_command() == expression
def test_invalid_syntax(self):
self.dialog.expression.insertPlainText('1 + {x')
assert not self.dialog.ui.button_ok.isEnabled()
assert self.dialog.ui.label_status.text() == 'Incomplete or invalid syntax'
def test_unknown_component(self):
self.dialog.expression.insertPlainText('1 + {z}')
assert not self.dialog.ui.button_ok.isEnabled()
assert self.dialog.ui.label_status.text() == 'Invalid component: z'
def test_undefined_name(self):
self.dialog.expression.insertPlainText('1 + {x} + abc')
assert not self.dialog.ui.button_ok.isEnabled()
assert self.dialog.ui.label_status.text() == "name 'abc' is not defined"
def test_insert_component(self):
self.dialog.expression.insertPlainText('1 + ')
self.dialog.button_insert.click()
assert self.dialog.ui.label_status.text() == 'Valid expression'
self.dialog.ui.button_ok.click()
assert self.dialog._get_raw_command() == '1 + {Pixel Axis 0 [x]}'
def test_typing(self):
# This ensures that the code that highlights syntax gets called,
# and also ensures we can test undoing.
chars = (Qt.Key_1, Qt.Key_Space, Qt.Key_Plus, Qt.Key_Space,
Qt.Key_BraceLeft, Qt.Key_X, Qt.Key_BraceRight)
for char in chars:
QtTest.QTest.keyClick(self.dialog.expression, char)
assert self.dialog.expression.toPlainText() == '1 + {x}'
QtTest.QTest.keyClick(self.dialog.expression, Qt.Key_Z, Qt.ControlModifier)
assert self.dialog.expression.toPlainText() == '1 + {x'
for i in range(4):
QtTest.QTest.keyClick(self.dialog.expression, Qt.Key_Z, Qt.ControlModifier)
assert self.dialog.expression.toPlainText() == '1 '
def test_cancel(self):
self.dialog.expression.insertPlainText('1 + {x}')
assert self.dialog.ui.label_status.text() == 'Valid expression'
self.dialog.ui.button_cancel.click()
assert self.dialog.final_expression is None
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/component_manager/qt/tests/test_equation_editor.py",
"copies": "1",
"size": "2997",
"license": "bsd-3-clause",
"hash": 5676039562706889000,
"line_mean": 37.4230769231,
"line_max": 87,
"alpha_frac": 0.6529863197,
"autogenerated": false,
"ratio": 3.517605633802817,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666800495415493,
"avg_score": 0.000758291617464744,
"num_lines": 78
} |
from __future__ import absolute_import, division, print_function
import pytest
from symantecssl.datastructures import CaseInsensitiveDict
class TestCaseInsensitiveDict:
@pytest.mark.parametrize(("initial", "expected"), [
({"a": "b"}, {"a": "b"}),
(None, {}),
])
def test_initial_data(self, initial, expected):
assert CaseInsensitiveDict(initial) == expected
def test_set_item(self):
d = CaseInsensitiveDict()
d["A"] = "one"
d["a"] = "two"
d["b"] = "three"
assert d == {"a": "two", "b": "three"}
def test_get_item(self):
d = CaseInsensitiveDict({"a": "one"})
assert d["a"] == "one"
assert d["A"] == "one"
def test_del_item(self):
a = CaseInsensitiveDict({"a": "one"})
b = CaseInsensitiveDict({"B": "two"})
c = CaseInsensitiveDict({"c": "three"})
del a["A"]
del b["b"]
del c["c"]
assert "a" not in a
assert "A" not in a
assert "b" not in b
assert "B" not in b
assert "c" not in c
assert "C" not in c
def test_iter(self):
assert set(CaseInsensitiveDict({"a": "", "B": ""})) == set(["a", "B"])
def test_len(self):
assert len(CaseInsensitiveDict()) == 0
assert len(CaseInsensitiveDict({"a": None})) == 1
assert len(CaseInsensitiveDict({"a": None, "b": None})) == 2
def test_equality(self):
# Empty
assert CaseInsensitiveDict() == CaseInsensitiveDict()
assert CaseInsensitiveDict() == {}
assert {} == CaseInsensitiveDict()
# Same cased items
assert (
CaseInsensitiveDict({"a": "one", "b": "two"})
== CaseInsensitiveDict({"a": "one", "b": "two"})
)
assert (
CaseInsensitiveDict({"a": "one", "b": "two"})
== {"a": "one", "b": "two"}
)
assert (
{"a": "one", "b": "two"}
== CaseInsensitiveDict({"a": "one", "b": "two"})
)
# Differently cased items
assert (
CaseInsensitiveDict({"a": "one", "B": "two"})
== CaseInsensitiveDict({"A": "one", "b": "two"})
)
assert (
CaseInsensitiveDict({"a": "one", "B": "two"})
== {"A": "one", "b": "two"}
)
assert (
{"a": "one", "B": "two"}
== CaseInsensitiveDict({"A": "one", "b": "two"})
)
# Nonsense
assert CaseInsensitiveDict() != []
def test_copy(self):
a = CaseInsensitiveDict({"a": "one"})
b = a.copy()
a["b"] = "two"
b["b"] = "three"
assert a == {"a": "one", "b": "two"}
assert b == {"a": "one", "b": "three"}
def test_lower_items(self):
d = CaseInsensitiveDict({"A": "one", "b": "two"})
assert set(d.lower_items()) == set([("a", "one"), ("b", "two")])
def test_repr(self):
a = CaseInsensitiveDict({"A": "one"})
b = CaseInsensitiveDict({"b": "one"})
assert repr(a) == "CaseInsensitiveDict(%r)" % {"A": "one"}
assert repr(b) == "CaseInsensitiveDict(%r)" % {"b": "one"}
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "tests/unit/test_datastructures.py",
"copies": "1",
"size": "3186",
"license": "apache-2.0",
"hash": 2433546475573119500,
"line_mean": 27.7027027027,
"line_max": 78,
"alpha_frac": 0.4786566227,
"autogenerated": false,
"ratio": 3.7570754716981134,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47357320943981135,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from symantecssl.exceptions import SymantecError
from symantecssl.email import ResendEmail
def test_resend_email_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ResendEmail>
<OrderResponseHeader>
<Timestamp>2014-05-29T19:22:45.749+0000</Timestamp>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
</ResendEmail>
""".strip()
assert ResendEmail().response(xml) is None
def test_resend_email_response_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ResendEmail>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:35:45.250+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!!</ErrorMessage>
</Error>
</Errors>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>-1</SuccessCode>
</OrderResponseHeader>
</ResendEmail>
""".strip()
with pytest.raises(SymantecError) as exc_info:
ResendEmail().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call ResendEmail returned an error: "
"'An Error Message!!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!!"}]
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "tests/unit/test_email.py",
"copies": "1",
"size": "1439",
"license": "apache-2.0",
"hash": -2150734513724345300,
"line_mean": 28.9791666667,
"line_max": 76,
"alpha_frac": 0.6108408617,
"autogenerated": false,
"ratio": 3.7183462532299743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9829187114929974,
"avg_score": 0,
"num_lines": 48
} |
from __future__ import absolute_import, division, print_function
import pytest
from symantecssl.exceptions import SymantecError
from symantecssl.order import(
Order, GetOrderByPartnerOrderID, GetOrdersByDateRange,
GetModifiedOrders, ModifyOrder, ChangeApproverEmail, Reissue, Revoke,
GetQuickApproverList, ValidateOrderParameters
)
def test_order_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<QuickOrder>
<OrderResponseHeader>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
<GeoTrustOrderID>abcdefg</GeoTrustOrderID>
</QuickOrder>
""".strip()
assert Order().response(xml) == {
"PartnerOrderID": "1234",
"GeoTrustOrderID": "abcdefg",
}
def test_order_response_failure():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<QuickOrder>
<OrderResponseHeader>
<Errors>
<Error>
<ErrorMessage>An Error!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>1</SuccessCode>
</OrderResponseHeader>
</QuickOrder>
""".strip()
with pytest.raises(SymantecError) as exc_info:
Order().response(xml)
assert exc_info.value.args == (
"The Symantec API call Order returned an error: 'An Error!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error!"}]
def test_get_order_by_partner_order_id_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetOrderByPartnerOrderID>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:36:43.318+0000</Timestamp>
<SuccessCode>0</SuccessCode>
<ReturnCount>1</ReturnCount>
</QueryResponseHeader>
<OrderDetail>
<OrderInfo>
<Method>RESELLER</Method>
<DomainName>testingsymantecssl.com</DomainName>
<ProductCode>SSL123</ProductCode>
<PartnerOrderID>1234</PartnerOrderID>
<ServerCount>1</ServerCount>
<ValidityPeriod>12</ValidityPeriod>
<OrderStatusMajor>PENDING</OrderStatusMajor>
<OrderState>WF_DOMAIN_APPROVAL</OrderState>
<OrderDate>2014-05-29T17:36:39.000+0000</OrderDate>
<RenewalInd>N</RenewalInd>
<Price>35</Price>
<GeoTrustOrderID>1806482</GeoTrustOrderID>
</OrderInfo>
<CertificateInfo>
<CertificateStatus>Good!</CertificateStatus>
<StartDate>Today</StartDate>
</CertificateInfo>
<OrderContacts>
<AdminContact>
<FirstName>John</FirstName>
<LastName>Doe</LastName>
</AdminContact>
</OrderContacts>
</OrderDetail>
</GetOrderByPartnerOrderID>
""".strip()
assert GetOrderByPartnerOrderID().response(xml) == {
"OrderInfo": {
"OrderStatusMajor": "PENDING",
"GeoTrustOrderID": "1806482",
"DomainName": "testingsymantecssl.com",
"ProductCode": "SSL123",
"ValidityPeriod": "12",
"OrderDate": "2014-05-29T17:36:39.000+0000",
"Price": "35",
"RenewalInd": "N",
"Method": "RESELLER",
"PartnerOrderID": "1234",
"OrderState": "WF_DOMAIN_APPROVAL",
"ServerCount": "1",
},
"CertificateInfo": {
"CertificateStatus": "Good!",
"StartDate": "Today",
},
"OrderContacts": {
"AdminContact": {
"FirstName": "John",
"LastName": "Doe",
},
},
}
def test_get_order_by_partner_order_id_response_failure():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetOrderByPartnerOrderID>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:49:18.880+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>-1</SuccessCode>
<ReturnCount>0</ReturnCount>
</QueryResponseHeader>
<OrderDetail/>
</GetOrderByPartnerOrderID>
""".strip()
with pytest.raises(SymantecError) as exc_info:
GetOrderByPartnerOrderID().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call GetOrderByPartnerOrderID returned an error: "
"'An Error Message!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!"}]
def test_get_orders_by_date_range_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetOrderByPartnerOrderID>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:36:43.318+0000</Timestamp>
<SuccessCode>0</SuccessCode>
<ReturnCount>1</ReturnCount>
</QueryResponseHeader>
<OrderDetails>
<OrderDetail>
<OrderInfo>
<Method>RESELLER</Method>
<DomainName>testingsymantecssl.com</DomainName>
<ProductCode>SSL123</ProductCode>
<PartnerOrderID>1234</PartnerOrderID>
<ServerCount>1</ServerCount>
<ValidityPeriod>12</ValidityPeriod>
<OrderStatusMajor>PENDING</OrderStatusMajor>
<OrderState>WF_DOMAIN_APPROVAL</OrderState>
<OrderDate>2014-05-29T17:36:39.000+0000</OrderDate>
<RenewalInd>N</RenewalInd>
<Price>35</Price>
<GeoTrustOrderID>1806482</GeoTrustOrderID>
</OrderInfo>
</OrderDetail>
<OrderDetail>
<OrderInfo>
<Method>RESELLER</Method>
<DomainName>testingsymantecssl.com</DomainName>
<ProductCode>SSL123</ProductCode>
<PartnerOrderID>1234</PartnerOrderID>
<ServerCount>1</ServerCount>
<ValidityPeriod>12</ValidityPeriod>
<OrderStatusMajor>PENDING</OrderStatusMajor>
<OrderState>WF_DOMAIN_APPROVAL</OrderState>
<OrderDate>2014-05-29T17:36:39.000+0000</OrderDate>
<RenewalInd>N</RenewalInd>
<Price>35</Price>
<GeoTrustOrderID>1806485</GeoTrustOrderID>
</OrderInfo>
</OrderDetail>
</OrderDetails>
</GetOrderByPartnerOrderID>
""".strip()
response = GetOrdersByDateRange().response(xml)
assert type(response) is list
assert response[0] == {
"OrderStatusMajor": "PENDING",
"GeoTrustOrderID": "1806482",
"DomainName": "testingsymantecssl.com",
"ProductCode": "SSL123",
"ValidityPeriod": "12",
"OrderDate": "2014-05-29T17:36:39.000+0000",
"Price": "35",
"RenewalInd": "N",
"Method": "RESELLER",
"PartnerOrderID": "1234",
"OrderState": "WF_DOMAIN_APPROVAL",
"ServerCount": "1",
}
assert response[1] == {
"OrderStatusMajor": "PENDING",
"GeoTrustOrderID": "1806485",
"DomainName": "testingsymantecssl.com",
"ProductCode": "SSL123",
"ValidityPeriod": "12",
"OrderDate": "2014-05-29T17:36:39.000+0000",
"Price": "35",
"RenewalInd": "N",
"Method": "RESELLER",
"PartnerOrderID": "1234",
"OrderState": "WF_DOMAIN_APPROVAL",
"ServerCount": "1",
}
def test_get_orders_by_date_range_response_failure():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetOrdersByDateRange>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:49:18.880+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>-1</SuccessCode>
<ReturnCount>0</ReturnCount>
</QueryResponseHeader>
<OrderDetail/>
</GetOrdersByDateRange>
""".strip()
with pytest.raises(SymantecError) as exc_info:
GetOrdersByDateRange().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call GetOrdersByDateRange returned an error: "
"'An Error Message!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!"}]
def test_get_modified_orders_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetModifiedOrders>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:36:43.318+0000</Timestamp>
<SuccessCode>0</SuccessCode>
<ReturnCount>1</ReturnCount>
</QueryResponseHeader>
<OrderDetails>
<OrderDetail>
<ModificationEvents>
<ModificationEvent>
<ModificationEventID>20919342</ModificationEventID>
<ModificationEventName>Order Created</ModificationEventName>
<ModificationTimestamp>2014-06-12</ModificationTimestamp>
</ModificationEvent>
</ModificationEvents>
<OrderInfo>
<Method>RESELLER</Method>
<DomainName>testingsymantecssl.com</DomainName>
<ProductCode>SSL123</ProductCode>
<PartnerOrderID>1234</PartnerOrderID>
<ServerCount>1</ServerCount>
<ValidityPeriod>12</ValidityPeriod>
<OrderStatusMajor>PENDING</OrderStatusMajor>
<OrderState>WF_DOMAIN_APPROVAL</OrderState>
<OrderDate>2014-05-29T17:36:39.000+0000</OrderDate>
<RenewalInd>N</RenewalInd>
<Price>35</Price>
<GeoTrustOrderID>1806482</GeoTrustOrderID>
</OrderInfo>
</OrderDetail>
<OrderDetail>
<ModificationEvents>
<ModificationEvent>
<ModificationEventID>20919340</ModificationEventID>
<ModificationEventName>Order Created</ModificationEventName>
<ModificationTimestamp>2014-06-12</ModificationTimestamp>
</ModificationEvent>
</ModificationEvents>
<OrderInfo>
<Method>RESELLER</Method>
<DomainName>testingsymantecssl.com</DomainName>
<ProductCode>SSL123</ProductCode>
<PartnerOrderID>1234</PartnerOrderID>
<ServerCount>1</ServerCount>
<ValidityPeriod>12</ValidityPeriod>
<OrderStatusMajor>PENDING</OrderStatusMajor>
<OrderState>WF_DOMAIN_APPROVAL</OrderState>
<OrderDate>2014-05-29T17:36:39.000+0000</OrderDate>
<RenewalInd>N</RenewalInd>
<Price>35</Price>
<GeoTrustOrderID>1806485</GeoTrustOrderID>
</OrderInfo>
</OrderDetail>
</OrderDetails>
</GetModifiedOrders>
""".strip()
response = GetModifiedOrders().response(xml)
assert type(response) is list
assert response[0]["OrderInfo"] == {
"OrderStatusMajor": "PENDING",
"GeoTrustOrderID": "1806482",
"DomainName": "testingsymantecssl.com",
"ProductCode": "SSL123",
"ValidityPeriod": "12",
"OrderDate": "2014-05-29T17:36:39.000+0000",
"Price": "35",
"RenewalInd": "N",
"Method": "RESELLER",
"PartnerOrderID": "1234",
"OrderState": "WF_DOMAIN_APPROVAL",
"ServerCount": "1",
}
assert response[0]["ModificationEvents"] == [{
"ModificationEventID": "20919342",
"ModificationEventName": "Order Created",
"ModificationTimestamp": "2014-06-12"
}]
assert response[1]["OrderInfo"] == {
"OrderStatusMajor": "PENDING",
"GeoTrustOrderID": "1806485",
"DomainName": "testingsymantecssl.com",
"ProductCode": "SSL123",
"ValidityPeriod": "12",
"OrderDate": "2014-05-29T17:36:39.000+0000",
"Price": "35",
"RenewalInd": "N",
"Method": "RESELLER",
"PartnerOrderID": "1234",
"OrderState": "WF_DOMAIN_APPROVAL",
"ServerCount": "1",
}
assert response[1]["ModificationEvents"] == [{
"ModificationEventID": "20919340",
"ModificationEventName": "Order Created",
"ModificationTimestamp": "2014-06-12"
}]
def test_get_modified_orders_response_failure():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetModifiedOrders>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:49:18.880+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>-1</SuccessCode>
<ReturnCount>0</ReturnCount>
</QueryResponseHeader>
<OrderDetail/>
</GetModifiedOrders>
""".strip()
with pytest.raises(SymantecError) as exc_info:
GetModifiedOrders().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call GetModifiedOrders returned an error: "
"'An Error Message!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!"}]
def test_change_approver_email_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ChangeApproverEmail>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:38:01.835+0000</Timestamp>
<PartnerOrderID>OxJL7QuR2gyX7LiQHJun0</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
</ChangeApproverEmail>
""".strip()
assert ChangeApproverEmail().response(xml) is None
def test_change_approver_email_response_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ChangeApproverEmail>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:35:45.250+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!!</ErrorMessage>
</Error>
</Errors>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>-1</SuccessCode>
</OrderResponseHeader>
</ChangeApproverEmail>
""".strip()
with pytest.raises(SymantecError) as exc_info:
ChangeApproverEmail().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call ChangeApproverEmail returned an error: "
"'An Error Message!!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!!"}]
def test_reissue_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<Reissue>
<OrderResponseHeader>
<Timestamp>2014-06-16T19:24:26.053+0000</Timestamp>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
<GeoTrustOrderID>abcdefg</GeoTrustOrderID>
</Reissue>
""".strip()
assert Reissue().response(xml) == {
"PartnerOrderID": "1234",
"GeoTrustOrderID": "abcdefg",
}
def test_reissue_response_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<Reissue>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:35:45.250+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!!</ErrorMessage>
</Error>
</Errors>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>-1</SuccessCode>
</OrderResponseHeader>
</Reissue>
""".strip()
with pytest.raises(SymantecError) as exc_info:
Reissue().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call Reissue returned an error: "
"'An Error Message!!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!!"}]
def test_revoke_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<Revoke>
<OrderResponseHeader>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
<GeoTrustOrderID>abcdefg</GeoTrustOrderID>
<SerialNumber>11111</SerialNumber>
</Revoke>
""".strip()
assert Revoke().response(xml) == {
"PartnerOrderID": "1234",
"GeoTrustOrderID": "abcdefg",
"SerialNumber": "11111",
}
def test_revoke_response_failure():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<Revoke>
<OrderResponseHeader>
<Errors>
<Error>
<ErrorMessage>An Error!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>1</SuccessCode>
</OrderResponseHeader>
</Revoke>
""".strip()
with pytest.raises(SymantecError) as exc_info:
Revoke().response(xml)
assert exc_info.value.args == (
"The Symantec API call Revoke returned an error: 'An Error!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error!"}]
def test_modify_order_response_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ModifyOrder>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:38:01.835+0000</Timestamp>
<PartnerOrderID>OxJL7QuR2gyX7LiQHJun0</PartnerOrderID>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
</ModifyOrder>
""".strip()
assert ModifyOrder().response(xml) is None
def test_modify_order_response_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ModifyOrder>
<OrderResponseHeader>
<Timestamp>2014-05-19T12:35:45.250+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!!</ErrorMessage>
</Error>
</Errors>
<PartnerOrderID>1234</PartnerOrderID>
<SuccessCode>-1</SuccessCode>
</OrderResponseHeader>
</ModifyOrder>
""".strip()
with pytest.raises(SymantecError) as exc_info:
ModifyOrder().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call ModifyOrder returned an error: "
"'An Error Message!!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!!"}]
def test_validate_order_parameters_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ValidateOrderParameters>
<ValidityPeriod>12</ValidityPeriod>
<Price>$35 USD</Price>
<OrderResponseHeader>
<Timestamp>2014-06-16T16:55:58.611+0000</Timestamp>
<SuccessCode>0</SuccessCode>
</OrderResponseHeader>
<ParsedCSR>
<State>Texas</State>
<Country>US</Country>
<DomainName>testingsymantecssl.com</DomainName>
<EncryptionAlgorithm>RSA</EncryptionAlgorithm>
<Locality>San Antonio</Locality>
<Organization>Test</Organization>
<Email/>
<HashAlgorithm>SHA1</HashAlgorithm>
<OrganizationUnit/>
<IsValidTrueDomainName>true</IsValidTrueDomainName>
<IsValidQuickDomainName>true</IsValidQuickDomainName>
<HasBadExtensions>false</HasBadExtensions>
</ParsedCSR>
<CertificateSignatureHashAlgorithm>SHA1</CertificateSignatureHashAlgorithm>
</ValidateOrderParameters>
""".strip()
assert ValidateOrderParameters().response(xml) == {
"ValidityPeriod": "12",
"Price": "$35 USD",
"ParsedCSR": {
"State": "Texas",
"Country": "US",
"DomainName": "testingsymantecssl.com",
"EncryptionAlgorithm": "RSA",
"Locality": "San Antonio",
"Organization": "Test",
"Email": None,
"HashAlgorithm": "SHA1",
"OrganizationUnit": None,
"IsValidTrueDomainName": "true",
"IsValidQuickDomainName": "true",
"HasBadExtensions": "false",
},
"CertificateSignatureHashAlgorithm": "SHA1",
}
def test_validate_order_parameters_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<ValidateOrderParameters>
<ValidityPeriod>0</ValidityPeriod>
<OrderResponseHeader>
<Timestamp>2014-06-16T16:54:34.260+0000</Timestamp>
<Errors>
<Error>
<ErrorCode>-2019</ErrorCode>
<ErrorField>ValidityPeriod</ErrorField>
<ErrorMessage>Validity period not valid</ErrorMessage>
</Error>
</Errors>
<SuccessCode>-1</SuccessCode>
</OrderResponseHeader>
<ParsedCSR>
<State>Texas</State>
<Country>US</Country>
<DomainName>testingsymantecssl.com</DomainName>
<EncryptionAlgorithm>RSA</EncryptionAlgorithm>
<Locality>San Antonio</Locality>
<Organization>Test</Organization>
<Email/>
<HashAlgorithm>SHA1</HashAlgorithm>
<OrganizationUnit/>
<IsValidTrueDomainName>true</IsValidTrueDomainName>
<IsValidQuickDomainName>true</IsValidQuickDomainName>
<HasBadExtensions>false</HasBadExtensions>
</ParsedCSR>
<CertificateSignatureHashAlgorithm>SHA1</CertificateSignatureHashAlgorithm>
</ValidateOrderParameters>
""".strip()
with pytest.raises(SymantecError) as exc_info:
ValidateOrderParameters().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call ValidateOrderParameters returned an error: "
"'Validity period not valid'",
)
assert exc_info.value.errors == [{
"ErrorCode": "-2019",
"ErrorField": "ValidityPeriod",
"ErrorMessage": "Validity period not valid",
}]
def test_get_quick_approver_list_success():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetQuickApproverList>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:36:43.318+0000</Timestamp>
<SuccessCode>0</SuccessCode>
<ReturnCount>1</ReturnCount>
</QueryResponseHeader>
<ApproverList>
<Approver>
<ApproverEmail>admin@testingsymantecssl.com</ApproverEmail>
<ApproverType>Generic</ApproverType>
</Approver>
<Approver>
<ApproverEmail>support_preprod@geotrust.com</ApproverEmail>
<ApproverType>Manual</ApproverType>
</Approver>
</ApproverList>
</GetQuickApproverList>
""".strip()
assert GetQuickApproverList().response(xml) == [
{
"ApproverEmail": "admin@testingsymantecssl.com",
"ApproverType": "Generic",
},
{
"ApproverEmail": "support_preprod@geotrust.com",
"ApproverType": "Manual",
},
]
def test_get_quick_approver_list_error():
xml = b"""
<?xml version="1.0" encoding="UTF-8"?>
<GetQuickApproverList>
<QueryResponseHeader>
<Timestamp>2014-05-29T17:49:18.880+0000</Timestamp>
<Errors>
<Error>
<ErrorMessage>An Error Message!!</ErrorMessage>
</Error>
</Errors>
<SuccessCode>-1</SuccessCode>
<ReturnCount>0</ReturnCount>
</QueryResponseHeader>
<ApproverList/>
</GetQuickApproverList>
""".strip()
with pytest.raises(SymantecError) as exc_info:
GetQuickApproverList().response(xml).response(xml)
assert exc_info.value.args == (
"The Symantec API call GetQuickApproverList returned an error: "
"'An Error Message!!'",
)
assert exc_info.value.errors == [{"ErrorMessage": "An Error Message!!"}]
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "tests/unit/test_order.py",
"copies": "1",
"size": "24086",
"license": "apache-2.0",
"hash": 3552062922617811500,
"line_mean": 32.6867132867,
"line_max": 83,
"alpha_frac": 0.5758116748,
"autogenerated": false,
"ratio": 3.82681919288211,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49026308676821095,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from tlsenum.parse_hello import (
ClientHello, Extensions, HandshakeFailure, ServerHello
)
class TestClientHello(object):
@pytest.mark.parametrize("version_string,protocol_minor", [
("3.0", 0), ("1.0", 1), ("1.1", 2), ("1.2", 3)
])
def test_protocol_version(self, version_string, protocol_minor):
msg = ClientHello()
msg.protocol_version = version_string
assert msg._protocol_minor == protocol_minor
assert msg.protocol_version == version_string
@pytest.mark.parametrize("deflate,result", [
(True, [1, 0]), (False, [0])
])
def test_compression_method(self, deflate, result):
msg = ClientHello()
msg.deflate = deflate
assert msg._compression_method == result
assert msg.deflate is deflate
def test_cipher_suites(self):
msg = ClientHello()
msg.cipher_suites = ["TLS_NULL_WITH_NULL_NULL"]
assert msg.cipher_suites == ["TLS_NULL_WITH_NULL_NULL"]
def test_get_bytes_from_cipher_suites(self):
msg = ClientHello()
assert msg._get_bytes_from_cipher_suites(
["TLS_NULL_WITH_NULL_NULL", "TLS_RSA_WITH_NULL_MD5"]
) == [0, 1]
def test_extensions(self):
msg = ClientHello()
msg.extensions = b"mock"
assert msg.extensions == b"mock"
def test_build(self, monkeypatch):
def mock_urandom(len):
return (
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
monkeypatch.setattr("time.time", lambda: 0)
monkeypatch.setattr("os.urandom", mock_urandom)
msg = ClientHello()
msg.protocol_version = "1.2"
msg.deflate = False
msg.cipher_suites = ["TLS_RSA_WITH_NULL_MD5"]
msg.extensions = b""
assert msg.build() == (
b"\x16"
b"\x03\x03"
b"\x00\x2F"
b"\x01"
b"\x00\x00\x2B"
b"\x03\x03"
b"\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00"
b"\x00\x02\x00\x01"
b"\x01\x00"
b"\x00\x00"
)
class TestExtensions(object):
def test_ec_point_format(self):
extension = Extensions()
extension.ec_point_format = [
"ansiX962_compressed_prime",
"uncompressed",
"ansiX962_compressed_char2"
]
assert extension.ec_point_format == [
"ansiX962_compressed_prime",
"uncompressed",
"ansiX962_compressed_char2"
]
assert extension.build() == b"\x00\x0B\x00\x04\x03\x01\x00\x02"
def test_get_bytes_from_ec_point_format(self):
extension = Extensions()
assert extension._get_bytes_from_ec_point_format([
"ansiX962_compressed_prime",
"uncompressed",
"ansiX962_compressed_char2"
]) == [1, 0, 2]
def test_ec_curves(self):
extension = Extensions()
extension.ec_curves = ["sect163k1", "sect163r1", "sect163r2"]
assert extension.ec_curves == ["sect163k1", "sect163r1", "sect163r2"]
assert extension.build() == (
b"\x00\x0A\x00\x08\x00\x06\x00\x01\x00\x02\x00\x03"
)
def test_get_bytes_from_ec_curves(self):
extension = Extensions()
assert extension._get_bytes_from_ec_curves([
"sect163k1", "sect163r1", "sect163r2"
]) == [1, 2, 3]
def test_sni_extension(self):
extension = Extensions()
extension.sni = "ayrx.me"
assert extension.sni == "ayrx.me"
assert extension.build() == (
b"\x00\x00\x00\x0C\x00\x0A\x00\x00\x07\x61\x79\x72\x78\x2E\x6D\x65"
)
class TestServerHello(object):
def test_parse_server_hello(self):
deflate_no = (
b"\x16\x03\x03\x00\x2A\x02\x00\x00\x26\x03\x03\xB5\xA4\x22\x01\x18"
b"\xC5\x71\x41\x97\x6D\xC7\x06\x14\xC0\xE5\x78\x7A\xF3\x1D\x4E\x56"
b"\x98\xCC\x7A\x37\xAE\x6F\x1D\xC6\xF0\x78\x68\x00\xC0\x2F\x00"
)
server_hello = ServerHello.parse_server_hello(deflate_no)
assert server_hello.protocol_version == "1.2"
assert server_hello.deflate is False
assert server_hello.cipher_suite == (
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
)
deflate_yes = (
b"\x16\x03\x03\x00\x2A\x02\x00\x00\x26\x03\x03\xB5\xA4\x22\x01\x18"
b"\xC5\x71\x41\x97\x6D\xC7\x06\x14\xC0\xE5\x78\x7A\xF3\x1D\x4E\x56"
b"\x98\xCC\x7A\x37\xAE\x6F\x1D\xC6\xF0\x78\x68\x00\xC0\x2F\x01"
)
server_hello = ServerHello.parse_server_hello(deflate_yes)
assert server_hello.protocol_version == "1.2"
assert server_hello.deflate is True
assert server_hello.cipher_suite == (
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
)
def test_parse_alert(self):
handshake_failure_alert_msg = b"\x15\x03\x03\x00\x02\x02\x28"
with pytest.raises(HandshakeFailure):
ServerHello.parse_server_hello(handshake_failure_alert_msg)
close_notify_alert_msg = b"\x15\x03\x03\x00\x02\x02\x00"
with pytest.raises(ValueError):
ServerHello.parse_server_hello(close_notify_alert_msg)
| {
"repo_name": "Ayrx/tlsenum",
"path": "tests/test_parse_hello.py",
"copies": "1",
"size": "5543",
"license": "mit",
"hash": -8522006991290384000,
"line_mean": 33.4285714286,
"line_max": 79,
"alpha_frac": 0.5800108245,
"autogenerated": false,
"ratio": 2.888483585200625,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39684944097006253,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import itertools
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze.expr import greatest, least, coalesce
from blaze import sin
import blaze
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate['count'].sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
assert (
compute(sym - (sym - delta), dates) ==
dates - (dates - delta)
).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
binary_name_map = {
'atan2': 'arctan2'
}
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['copysign', 'ldexp'], [dict(optimize=False), dict()])
)
def test_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['atan2', 'hypot'], [dict(optimize=False), dict()])
)
def test_floating_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_allclose(result, expected)
def test_selection_inner_inputs():
s_data = np.arange(5).reshape(5, 1)
t_data = np.arange(5).reshape(5, 1)
s = symbol('s', 'var * {a: int64}')
t = symbol('t', 'var * {a: int64}')
assert (
compute(s[s.a == t.a], {s: s_data, t: t_data}) ==
s_data
).all()
def test_coalesce():
data = np.array([0, None, 1, None, 2, None])
s = symbol('s', 'var * ?int')
t = symbol('t', 'int')
u = symbol('u', '?int')
v = symbol('v', 'var * int')
w = symbol('w', 'var * ?int')
# array to scalar
np.testing.assert_array_equal(
compute(coalesce(s, t), {s: data, t: -1}),
np.array([0, -1, 1, -1, 2, -1]),
)
# array to scalar with NULL
np.testing.assert_array_equal(
compute(coalesce(s, u), {s: data, u: None}),
np.array([0, None, 1, None, 2, None], dtype=object)
)
# array to array
np.testing.assert_array_equal(
compute(coalesce(s, v), {
s: data, v: np.array([-1, -2, -3, -4, -5, -6]),
}),
np.array([0, -2, 1, -4, 2, -6])
)
# array to array with NULL
np.testing.assert_array_equal(
compute(coalesce(s, w), {
s: data, w: np.array([-1, None, -3, -4, -5, -6]),
}),
np.array([0, None, 1, -4, 2, -6]),
)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/tests/test_numpy_compute.py",
"copies": "3",
"size": "20852",
"license": "bsd-3-clause",
"hash": 1928576290793857000,
"line_mean": 28.7885714286,
"line_max": 82,
"alpha_frac": 0.5281987339,
"autogenerated": false,
"ratio": 2.958989640981978,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9985389015332056,
"avg_score": 0.00035987190998452556,
"num_lines": 700
} |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze import sin
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate.count.sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
| {
"repo_name": "maxalbert/blaze",
"path": "blaze/compute/tests/test_numpy_compute.py",
"copies": "3",
"size": "16537",
"license": "bsd-3-clause",
"hash": 2990124276218332700,
"line_mean": 28.5303571429,
"line_max": 82,
"alpha_frac": 0.5169619641,
"autogenerated": false,
"ratio": 2.997462388979518,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004498398874806569,
"num_lines": 560
} |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import tensorflow as tf
import logging
from lucid.optvis.param.cppn import cppn
log = logging.getLogger(__name__)
@pytest.mark.slow
def test_cppn_fits_xor():
with tf.Graph().as_default(), tf.Session() as sess:
cppn_param = cppn(16, num_output_channels=1)[0]
def xor_objective(a):
return -(
tf.square(a[0, 0])
+ tf.square(a[-1, -1])
+ tf.square(1.0 - a[-1, 0])
+ tf.square(1.0 - a[0, -1])
)
loss_t = xor_objective(cppn_param)
optimizer = tf.train.AdamOptimizer(0.01)
objective = optimizer.minimize(loss_t)
for try_i in range(3):
tf.global_variables_initializer().run()
# loss = loss_t.eval()
for i in range(200):
_, vis = sess.run([objective, cppn_param])
close_enough = (
vis[0, 0] > .99
and vis[-1, -1] > .99
and vis[-1, 0] < .01
and vis[0, -1] < .01
)
if close_enough:
return
assert False, "fitting XOR took more than 200 steps, failing test"
| {
"repo_name": "tensorflow/lucid",
"path": "tests/optvis/param/test_cppn.py",
"copies": "1",
"size": "1302",
"license": "apache-2.0",
"hash": 6490602012742315000,
"line_mean": 27.3043478261,
"line_max": 74,
"alpha_frac": 0.4976958525,
"autogenerated": false,
"ratio": 3.6166666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9614362519166667,
"avg_score": 0,
"num_lines": 46
} |
from __future__ import absolute_import, division, print_function
import pytest
import os
import numpy as np
HAVE_COLAB_NVIDIA = (os.path.exists('/usr/lib64-nvidia/') and
os.path.exists('/opt/bin/nvidia-smi'))
WIDTH, HEIGHT = 200, 100
if HAVE_COLAB_NVIDIA:
from lucid.misc.gl import glcontext # must be imported before OpenGL.GL
import OpenGL.GL as gl
from lucid.misc.gl import glrenderer
glcontext.create_opengl_context((WIDTH, HEIGHT))
@pytest.mark.skipif(not HAVE_COLAB_NVIDIA, reason="GPU Colab kernel only")
def test_gl_context():
# Render triangle
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glBegin(gl.GL_TRIANGLES)
gl.glColor3f(1, 0, 0)
gl.glVertex2f(0, 1)
gl.glColor3f(0, 1, 0)
gl.glVertex2f(-1, -1)
gl.glColor3f(0, 0, 1)
gl.glVertex2f(1, -1)
gl.glEnd()
# Read result
img_buf = gl.glReadPixels(0, 0, WIDTH, HEIGHT, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
img = np.frombuffer(img_buf, np.uint8).reshape(HEIGHT, WIDTH, 3)[::-1]
assert all(img[0, 0] == 0) # black corner
assert all(img[0,-1] == 0) # black corner
assert img[10, WIDTH//2].argmax() == 0 # red corner
assert img[-1, 10].argmax() == 1 # green corner
assert img[-1, -10].argmax() == 2 # blue corner
@pytest.mark.skipif(not HAVE_COLAB_NVIDIA, reason="GPU Colab kernel only")
def test_glrenderer():
w, h = 400, 200
renderer = glrenderer.MeshRenderer((w, h))
renderer.fovy = 90
position = [[0, 1, -1], [-2, -1,-1], [2, -1, -1]]
color = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
img = renderer.render_mesh(position, color)
img, alpha = img[..., :3], img[..., 3]
assert all(img[0, 0] == 0) # black corner
assert all(img[0,-1] == 0) # black corner
assert img[10, w//2].argmax() == 0 # red corner
assert img[-1, 10].argmax() == 1 # green corner
assert img[-1, -10].argmax() == 2 # blue corner
assert np.abs(img.sum(-1)-alpha).max() < 1e-5 | {
"repo_name": "tensorflow/lucid",
"path": "tests/test_gl.py",
"copies": "1",
"size": "1919",
"license": "apache-2.0",
"hash": -7593234174846766000,
"line_mean": 29.4761904762,
"line_max": 84,
"alpha_frac": 0.6263678999,
"autogenerated": false,
"ratio": 2.6876750700280114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38140429699280115,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import stripe
class TestUpdateableAPIResource(object):
class MyUpdateable(stripe.api_resources.abstract.UpdateableAPIResource):
OBJECT_NAME = "myupdateable"
@pytest.fixture
def obj(self, request_mock):
request_mock.stub_request(
"post",
"/v1/myupdateables/myid",
{"id": "myid", "thats": "it"},
rheaders={"request-id": "req_id"},
)
return self.MyUpdateable.construct_from(
{
"id": "myid",
"foo": "bar",
"baz": "boz",
"metadata": {"size": "l", "score": 4, "height": 10},
},
"mykey",
)
def checkSave(self, obj):
assert obj is obj.save()
assert obj.thats == "it"
# TODO: Should we force id to be retained?
# assert obj.id == 'myid'
with pytest.raises(AttributeError):
obj.baz
def test_idempotent_save(self, request_mock, obj):
obj.baz = "updated"
obj.save(idempotency_key="foo")
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"baz": "updated"},
{"Idempotency-Key": "foo"},
)
def test_save(self, request_mock, obj):
obj.baz = "updated"
obj.other = "newval"
obj.metadata.size = "m"
obj.metadata.info = "a2"
obj.metadata.height = None
self.checkSave(obj)
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{
"baz": "updated",
"other": "newval",
"metadata": {"size": "m", "info": "a2", "height": ""},
},
None,
)
assert obj.last_response is not None
assert obj.last_response.request_id == "req_id"
# Saving again should not cause any request.
request_mock.reset_mock()
self.checkSave(obj)
request_mock.assert_no_request()
# Setting the same value should cause a request.
request_mock.stub_request(
"post", "/v1/myupdateables/myid", {"id": "myid", "thats": "it"}
)
obj.thats = "it"
self.checkSave(obj)
request_mock.assert_requested(
"post", "/v1/myupdateables/myid", {"thats": "it"}, None
)
# Changing the value should cause a request.
request_mock.stub_request(
"post", "/v1/myupdateables/myid", {"id": "myid", "thats": "it"}
)
obj.id = "myid"
obj.thats = "updated"
self.checkSave(obj)
request_mock.assert_requested(
"post", "/v1/myupdateables/myid", {"thats": "updated"}, None
)
def test_add_key_to_nested_object(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{
"id": "myid",
"legal_entity": {"size": "l", "score": 4, "height": 10},
},
"mykey",
)
acct.legal_entity["first_name"] = "bob"
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"legal_entity": {"first_name": "bob"}},
None,
)
def test_save_nothing(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{"id": "myid", "metadata": {"key": "value"}}, "mykey"
)
assert acct is acct.save()
request_mock.assert_no_request()
def test_replace_nested_object(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{"id": "myid", "legal_entity": {"last_name": "smith"}}, "mykey"
)
acct.legal_entity = {"first_name": "bob"}
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"legal_entity": {"first_name": "bob", "last_name": ""}},
None,
)
def test_array_setting(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{"id": "myid", "legal_entity": {}}, "mykey"
)
acct.legal_entity.additional_owners = [{"first_name": "Bob"}]
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"legal_entity": {"additional_owners": [{"first_name": "Bob"}]}},
None,
)
def test_array_none(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{"id": "myid", "legal_entity": {"additional_owners": None}},
"mykey",
)
acct.foo = "bar"
assert acct is acct.save()
request_mock.assert_requested(
"post", "/v1/myupdateables/myid", {"foo": "bar"}, None
)
def test_array_insertion(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{"id": "myid", "legal_entity": {"additional_owners": []}}, "mykey"
)
acct.legal_entity.additional_owners.append({"first_name": "Bob"})
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{
"legal_entity": {
"additional_owners": {"0": {"first_name": "Bob"}}
}
},
None,
)
def test_array_update(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{
"id": "myid",
"legal_entity": {
"additional_owners": [
{"first_name": "Bob"},
{"first_name": "Jane"},
]
},
},
"mykey",
)
acct.legal_entity.additional_owners[1].first_name = "Janet"
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{
"legal_entity": {
"additional_owners": {
"0": {},
"1": {"first_name": "Janet"},
}
}
},
None,
)
def test_array_noop(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{
"id": "myid",
"legal_entity": {"additional_owners": [{"first_name": "Bob"}]},
"currencies_supported": ["usd", "cad"],
},
"mykey",
)
assert acct is acct.save()
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"legal_entity": {"additional_owners": {"0": {}}}},
None,
)
def test_hash_noop(self, request_mock, obj):
acct = self.MyUpdateable.construct_from(
{
"id": "myid",
"legal_entity": {"address": {"line1": "1 Two Three"}},
},
"mykey",
)
assert acct is acct.save()
request_mock.assert_no_request()
def test_save_replace_metadata_with_number(self, request_mock, obj):
obj.baz = "updated"
obj.other = "newval"
obj.metadata = 3
self.checkSave(obj)
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"baz": "updated", "other": "newval", "metadata": 3},
None,
)
def test_save_overwrite_metadata(self, request_mock, obj):
obj.metadata = {}
self.checkSave(obj)
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{"metadata": {"size": "", "score": "", "height": ""}},
None,
)
def test_save_replace_metadata(self, request_mock, obj):
obj.baz = "updated"
obj.other = "newval"
obj.metadata = {"size": "m", "info": "a2", "score": 4}
self.checkSave(obj)
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{
"baz": "updated",
"other": "newval",
"metadata": {
"size": "m",
"info": "a2",
"height": "",
"score": 4,
},
},
None,
)
def test_save_update_metadata(self, request_mock, obj):
obj.baz = "updated"
obj.other = "newval"
obj.metadata.update({"size": "m", "info": "a2", "score": 4})
self.checkSave(obj)
request_mock.assert_requested(
"post",
"/v1/myupdateables/myid",
{
"baz": "updated",
"other": "newval",
"metadata": {"size": "m", "info": "a2", "score": 4},
},
None,
)
def test_retrieve_and_update_with_stripe_version(self, request_mock, obj):
request_mock.stub_request(
"get", "/v1/myupdateables/foo", {"id": "foo", "bobble": "scrobble"}
)
res = self.MyUpdateable.retrieve("foo", stripe_version="2017-08-15")
request_mock.assert_api_version("2017-08-15")
request_mock.stub_request(
"post",
"/v1/myupdateables/foo",
{"id": "foo", "bobble": "new_scrobble"},
)
res.bobble = "new_scrobble"
res.save()
request_mock.assert_api_version("2017-08-15")
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/abstract/test_updateable_api_resource.py",
"copies": "1",
"size": "9736",
"license": "mit",
"hash": -6630615364906153000,
"line_mean": 27.0576368876,
"line_max": 79,
"alpha_frac": 0.4725760066,
"autogenerated": false,
"ratio": 3.822536317235964,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4795112323835964,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import stripe
TEST_RESOURCE_ID = "acap_123"
class TestCapability(object):
def construct_resource(self):
capability_dict = {
"id": TEST_RESOURCE_ID,
"object": "capability",
"account": "acct_123",
}
return stripe.Capability.construct_from(
capability_dict, stripe.api_key
)
def test_has_instance_url(self, request_mock):
resource = self.construct_resource()
assert (
resource.instance_url()
== "/v1/accounts/acct_123/capabilities/%s" % TEST_RESOURCE_ID
)
def test_is_not_modifiable(self, request_mock):
with pytest.raises(NotImplementedError):
stripe.Capability.modify(TEST_RESOURCE_ID, requested=True)
def test_is_not_retrievable(self, request_mock):
with pytest.raises(NotImplementedError):
stripe.Capability.retrieve(TEST_RESOURCE_ID)
def test_is_saveable(self, request_mock):
resource = self.construct_resource()
resource.requested = True
resource.save()
request_mock.assert_requested(
"post", "/v1/accounts/acct_123/capabilities/%s" % TEST_RESOURCE_ID
)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_capability.py",
"copies": "1",
"size": "1286",
"license": "mit",
"hash": 1793848686913154800,
"line_mean": 28.9069767442,
"line_max": 78,
"alpha_frac": 0.6220839813,
"autogenerated": false,
"ratio": 3.9691358024691357,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5091219783769135,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
import sys
import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import gzip
import datashape
from datashape import Option, string
from collections import Iterator
from odo.backends.csv import (CSV, append, convert, resource,
csv_to_dataframe, CSV_to_chunks_of_dataframes,
infer_header)
from odo.utils import tmpfile, filetext, filetexts, raises
from odo import (into, append, convert, resource, discover, dshape, Temp,
chunks, odo)
from odo.temp import _Temp
from odo.compatibility import unicode
def test_csv():
with tmpfile('.csv') as fn:
csv = CSV(
fn, dshape='var * {name: string, amount: int}', delimiter=',')
assert csv.dialect['delimiter'] == ','
def test_csv_append():
with tmpfile('.csv') as fn:
csv = CSV(fn, has_header=False)
data = [('Alice', 100), ('Bob', 200)]
append(csv, data)
assert list(convert(Iterator, csv)) == data
with open(fn) as f:
s = f.read()
assert 'Alice' in s
assert '100' in s
def test_pandas_read():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_datetimes():
with filetext('Alice,2014-01-02\nBob,2014-01-03') as fn:
ds = datashape.dshape('var * {name: string, when: date}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'when']
assert df.dtypes['when'] == 'M8[ns]'
def test_pandas_read_supports_whitespace_strings():
with filetext('a,b, \n1,2, \n2,3, \n', extension='csv') as fn:
csv = CSV(fn)
ds = discover(csv)
assert ds == datashape.dshape("var * {a: int64, b: int64, '': ?string}")
def test_pandas_read_supports_missing_integers():
with filetext('Alice,1\nBob,') as fn:
ds = datashape.dshape('var * {name: string, val: ?int32}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'val']
assert df.dtypes['val'] == 'f4'
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[0] < 3,
reason="Doesn't work on Windows")
def test_pandas_read_supports_gzip():
with filetext('Alice,1\nBob,2', open=gzip.open,
mode='wt', extension='.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_read_csv_kwargs():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_dataframe(csv, dshape=ds, usecols=['name'])
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice',), ('Bob',)]
def test_pandas_write():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
# Doesn't write header twice
append(csv, data, dshape=ds)
with open(fn) as f:
s = f.read()
assert s.count('name') == 1
def test_pandas_writes_header_by_default():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
@pytest.mark.xfail(sys.version_info[0] == 3, reason="Doesn't work on Python 3")
def test_pandas_write_gzip():
with tmpfile('.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
f = gzip.open(fn)
s = f.read()
assert 'name' in s
assert 'Alice,1' in s
f.close()
def test_pandas_loads_in_datetimes_naively():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: ?string, when: ?datetime}')
assert discover(csv) == ds
df = convert(pd.DataFrame, csv)
assert df.dtypes['when'] == 'M8[ns]'
@pytest.mark.xfail(sys.platform == 'win32' and sys.version_info[0] < 3,
reason="Doesn't work on Windows")
def test_pandas_discover_on_gzipped_files():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02',
open=gzip.open, mode='wt', extension='.csv.gz') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: ?string, when: ?datetime}')
assert discover(csv) == ds
def test_csv_into_list():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
L = into(list, fn)
assert L == [('Alice', 100), ('Bob', 200)]
def test_discover_csv_files_without_header():
with filetext('Alice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=False)
df = convert(pd.DataFrame, csv)
assert len(df) == 2
assert 'Alice' not in list(df.columns)
def test_discover_csv_yields_string_on_totally_empty_columns():
expected = dshape('var * {a: int64, b: ?string, c: int64}')
with filetext('a,b,c\n1,,3\n4,,6\n7,,9') as fn:
csv = CSV(fn, has_header=True)
assert discover(csv) == expected
def test_glob():
d = {'accounts1.csv': 'name,when\nAlice,100\nBob,200',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
r = resource('accounts*.csv', has_header=True)
assert convert(list, r) == [('Alice', 100), ('Bob', 200),
('Alice', 300), ('Bob', 400)]
r = resource('*.csv')
assert isinstance(r, chunks(CSV))
def test_pandas_csv_naive_behavior_results_in_columns():
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
with tmpfile('.csv') as fn:
into(fn, df)
with open(fn) as f:
assert next(f).strip() == 'id,name,amount'
def test_discover_csv_without_columns():
with filetext('Alice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
ds = discover(csv)
assert '100' not in str(ds)
def test_header_argument_set_with_or_without_header():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
with filetext('Alice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
def test_first_csv_establishes_consistent_dshape():
d = {'accounts1.csv': 'name,when\nAlice,one\nBob,two',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
result = into(list, 'accounts*.csv')
assert len(result) == 4
assert all(isinstance(val, (str, unicode)) for name, val in result)
def test_discover_csv_with_spaces_in_header():
with filetext(' name, val\nAlice,100\nBob,200', extension='csv') as fn:
ds = discover(CSV(fn, has_header=True))
assert ds.measure.names == ['name', 'val']
def test_header_disagrees_with_dshape():
ds = datashape.dshape('var * {name: string, bal: int64}')
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn, header=True)
assert convert(list, csv) == [('Alice', 100), ('Bob', 200)]
assert list(convert(pd.DataFrame, csv).columns) == ['name', 'val']
assert list(convert(pd.DataFrame, csv, dshape=ds).columns) == [
'name', 'bal']
def test_header_mix_str_digits():
ds = datashape.dshape('''var * {"On- or Off- Budget": ?string,
"1990": ?string}''')
with filetext('On- or Off- Budget,1990\nOn Budget,-628\nOff budget,"5,962"\n') as fn:
csv = CSV(fn, has_header=True)
df = convert(pd.DataFrame, csv)
assert discover(csv).measure == ds.measure
def test_raise_errors_quickly_on_into_chunks_dataframe():
with filetext('name,val\nAlice,100\nBob,foo', extension='csv') as fn:
ds = datashape.dshape('var * {name: string, val: int}')
csv = CSV(fn, header=True)
assert raises(Exception,
lambda: CSV_to_chunks_of_dataframes(csv, dshape=ds))
def test_unused_datetime_columns():
ds = datashape.dshape('var * {val: string, when: datetime}')
with filetext("val,when\na,2000-01-01\nb,2000-02-02") as fn:
csv = CSV(fn, has_header=True)
assert convert(list, csv_to_dataframe(csv, usecols=['val'],
squeeze=True, dshape=ds)) == ['a', 'b']
def test_empty_dataframe():
with filetext('name,val', extension='csv') as fn:
csv = CSV(fn, has_header=True)
df = convert(pd.DataFrame, csv)
assert isinstance(df, pd.DataFrame)
def test_csv_missing_values():
with filetext('name,val\nAlice,100\nNA,200', extension='csv') as fn:
csv = CSV(fn)
assert discover(csv).measure.dict['name'] == Option(string)
def test_csv_separator_header():
with filetext('a|b|c\n1|2|3\n4|5|6', extension='csv') as fn:
csv = CSV(fn, delimiter='|', has_header=True)
assert convert(list, csv) == [(1, 2, 3), (4, 5, 6)]
df = pd.DataFrame([['Alice', 100],
['Bob', 200],
['Charlie', 300]],
columns=['name', 'balance'])
def test_temp_csv():
csv = into(Temp(CSV)('_test_temp_csv.csv'), df)
assert isinstance(csv, CSV)
assert into(list, csv) == into(list, df)
del csv
import gc
gc.collect()
assert not os.path.exists('_test_temp_csv.csv')
def test_convert_to_csv():
csv = into(Temp(CSV), df)
assert isinstance(csv, CSV)
assert into(list, csv) == into(list, df)
assert isinstance(csv, _Temp)
def test_unicode_column_names():
with filetext(b'f\xc3\xbc,a\n1,2\n3,4', extension='csv', mode='wb') as fn:
df = into(pd.DataFrame, CSV(fn, has_header=True))
expected = pd.DataFrame([(1, 2), (3, 4)],
columns=[b'f\xc3\xbc'.decode('utf8'), u'a'])
tm.assert_frame_equal(df, expected)
def test_more_unicode_column_names():
with filetext(b'foo\xc4\x87,a\n1,2\n3,4', extension='csv',
mode='wb') as fn:
df = into(pd.DataFrame, CSV(fn, has_header=True))
expected = pd.DataFrame([(1, 2), (3, 4)],
columns=[b'foo\xc4\x87'.decode('utf8'), u'a'])
tm.assert_frame_equal(df, expected)
def test_infer_header():
with filetext('name,val\nAlice,100\nNA,200', extension='csv') as fn:
assert infer_header(CSV(fn).path, 100) == True
with filetext('Alice,100\nNA,200', extension='csv') as fn:
assert infer_header(CSV(fn).path, 100) == False
def test_csv_supports_sep():
assert CSV('foo.csv', sep=';').dialect['delimiter'] == ';'
def test_csv_to_compressed_csv():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a,1\nb,2\nc,3')
with tmpfile('.csv.gz') as gfn:
result = odo(fn, gfn)
assert odo(result, list) == odo(fn, list)
def test_has_header_on_tsv():
with tmpfile('.csv') as fn:
with open(fn, 'wb') as f:
f.write(b'a\tb\n1\t2\n3\t4')
csv = CSV(fn)
assert csv.has_header
def test_header_with_quotes():
csv = CSV(os.path.join(os.path.dirname(__file__), 'encoding.csv'),
encoding='latin1')
expected = dshape("""var * {
D_PROC: ?string,
NUM_SEQ: int64,
COD_TIP_RELAC: ?float64,
COMPL: ?string,
COD_ASSUNTO: int64
}
""")
assert discover(csv) == expected
def test_encoding_is_none():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a,1\nb,2\nc,3'.encode('utf-8').decode('utf-8'))
assert CSV(fn, encoding=None).encoding == 'utf-8'
def test_discover_with_dotted_names():
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('a.b,c.d\n1,2\n3,4')
dshape = discover(resource(fn))
assert dshape == datashape.dshape('var * {"a.b": int64, "c.d": int64}')
assert dshape.measure.names == [u'a.b', u'c.d']
try:
unichr
except NameError:
unichr = chr
def random_multibyte_string(nrows, string_length,
domain=''.join(map(unichr, range(1488, 1515)))):
""" Generate `n` strings of length `string_length` sampled from `domain`.
Parameters
----------
n : int
Number of random strings to generate
string_length : int
Length of each random string
domain : str, optional
The set of characters to sample from. Defaults to Hebrew.
"""
for _ in range(nrows):
yield ''.join(np.random.choice(list(domain), size=string_length))
@pytest.yield_fixture
def multibyte_csv():
header = random_multibyte_string(nrows=2, string_length=3)
single_column = random_multibyte_string(nrows=10, string_length=4)
numbers = np.random.randint(4, size=10)
with tmpfile('.csv') as fn:
with open(fn, 'wb') as f:
f.write((','.join(header) + '\n').encode('utf8'))
f.write('\n'.join(','.join(map(unicode, row))
for row in zip(single_column, numbers)).encode('utf8'))
yield fn
def test_multibyte_encoding_header(multibyte_csv):
c = CSV(multibyte_csv, encoding='utf8', sniff_nbytes=3)
assert c.has_header is None # not enough data to infer header
def test_multibyte_encoding_dialect(multibyte_csv):
c = CSV(multibyte_csv, encoding='utf8', sniff_nbytes=10)
assert c.dialect['delimiter'] == ','
@pytest.mark.parametrize('string_dshape', ['string', 'string[25]'])
def test_string_n_convert(string_dshape):
data = [
'2015-03-13,FOO THE BAR',
'2014-01-29,BAZ THE QUUX'
]
ds = 'var * {k: date, n: %s}' % string_dshape
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write('\n'.join(data))
csv = CSV(fn, has_header=False)
result = odo(csv, pd.DataFrame, dshape=ds)
assert list(result.columns) == list('kn')
raw = [tuple(x.split(',')) for x in data]
expected = pd.DataFrame(raw, columns=list('kn'))
expected['k'] = pd.to_datetime(expected.k)
tm.assert_frame_equal(result, expected)
def test_globbed_csv_to_chunks_of_dataframe():
header = 'a,b,c\n'
d = {'a-1.csv': header + '1,2,3\n4,5,6\n',
'a-2.csv': header + '7,8,9\n10,11,12\n'}
with filetexts(d):
dfs = list(odo('a-*.csv', chunks(pd.DataFrame)))
assert len(dfs) == 2
columns = 'a', 'b', 'c'
tm.assert_frame_equal(dfs[0],
pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=columns))
tm.assert_frame_equal(dfs[1],
pd.DataFrame([[7, 8, 9], [10, 11, 12]], columns=columns))
def test_globbed_csv_to_dataframe():
header = 'a,b,c\n'
d = {'a-1.csv': header + '1,2,3\n4,5,6\n',
'a-2.csv': header + '7,8,9\n10,11,12\n'}
with filetexts(d):
df = odo('a-*.csv', pd.DataFrame)
tm.assert_frame_equal(
df,
pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
columns=['a', 'b', 'c']),
)
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/backends/tests/test_csv.py",
"copies": "4",
"size": "16450",
"license": "bsd-3-clause",
"hash": 3496978353920562700,
"line_mean": 32.2323232323,
"line_max": 89,
"alpha_frac": 0.5688145897,
"autogenerated": false,
"ratio": 3.167725784710187,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002788113563992509,
"num_lines": 495
} |
from __future__ import absolute_import, division, print_function
import pytest
import tensorflow as tf
import numpy as np
from lucid.optvis import objectives, param, render, transform
from lucid.modelzoo.vision_models import InceptionV1
np.random.seed(42)
NUM_STEPS = 3
@pytest.fixture
def inceptionv1():
return InceptionV1()
def assert_gradient_ascent(objective, model, batch=None, alpha=False, shape=None):
with tf.Graph().as_default() as graph, tf.Session() as sess:
shape = shape or [1, 32, 32, 3]
t_input = param.image(shape[1], h=shape[2], batch=batch, alpha=alpha)
if alpha:
t_input = transform.collapse_alpha_random()(t_input)
model.import_graph(t_input, scope="import", forget_xy_shape=True)
def T(layer):
if layer == "input":
return t_input
if layer == "labels":
return model.labels
return graph.get_tensor_by_name("import/%s:0" % layer)
loss_t = objective(T)
opt_op = tf.train.AdamOptimizer(0.1).minimize(-loss_t)
tf.global_variables_initializer().run()
start_value = sess.run([loss_t])
for _ in range(NUM_STEPS):
_ = sess.run([opt_op])
end_value, = sess.run([loss_t])
print(start_value, end_value)
assert start_value < end_value
def test_neuron(inceptionv1):
objective = objectives.neuron("mixed4a_pre_relu", 42)
assert_gradient_ascent(objective, inceptionv1)
def test_channel(inceptionv1):
objective = objectives.channel("mixed4a_pre_relu", 42)
assert_gradient_ascent(objective, inceptionv1)
@pytest.mark.parametrize("cossim_pow", [0, 1, 2])
def test_direction(cossim_pow, inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random((mixed_4a_depth))
objective = objectives.direction(
"mixed4a_pre_relu", random_direction, cossim_pow=cossim_pow
)
assert_gradient_ascent(objective, inceptionv1)
def test_direction_neuron(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([mixed_4a_depth])
objective = objectives.direction_neuron("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_direction_cossim(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([mixed_4a_depth]).astype(np.float32)
objective = objectives.direction_cossim("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_tensor_neuron(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([1,3,3,mixed_4a_depth])
objective = objectives.tensor_direction("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_deepdream(inceptionv1):
objective = objectives.deepdream("mixed4a_pre_relu")
assert_gradient_ascent(objective, inceptionv1)
def test_tv(inceptionv1):
objective = objectives.total_variation("mixed4a_pre_relu")
assert_gradient_ascent(objective, inceptionv1)
def test_L1(inceptionv1):
objective = objectives.L1() # on input by default
assert_gradient_ascent(objective, inceptionv1)
def test_L2(inceptionv1):
objective = objectives.L2() # on input by default
assert_gradient_ascent(objective, inceptionv1)
def test_blur_input_each_step(inceptionv1):
objective = objectives.blur_input_each_step()
assert_gradient_ascent(objective, inceptionv1)
# TODO: add test_blur_alpha_each_step
# def test_blur_alpha_each_step(inceptionv1):
# objective = objectives.blur_alpha_each_step()
# assert_gradient_ascent(objective, inceptionv1, alpha=True)
def test_channel_interpolate(inceptionv1):
# TODO: should channel_interpolate fail early if batch is available?
objective = objectives.channel_interpolate(
"mixed4a_pre_relu", 0, "mixed4a_pre_relu", 42
)
assert_gradient_ascent(objective, inceptionv1, batch=5)
def test_penalize_boundary_complexity(inceptionv1):
# TODO: is input shape really unknown at evaluation time?
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.penalize_boundary_complexity([1, 32, 32, 3])
assert_gradient_ascent(-1 * objective, inceptionv1)
def test_alignment(inceptionv1):
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.alignment("mixed4a_pre_relu")
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
def test_diversity(inceptionv1):
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.diversity("mixed4a_pre_relu")
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
def test_input_diff(inceptionv1):
random_image = np.random.random([1, 32, 32, 3])
objective = objectives.input_diff(random_image)
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
@pytest.mark.xfail(reason="Unknown cause of failures; seems find in colab.")
def test_class_logit(inceptionv1):
objective = objectives.class_logit("softmax1", "kit fox")
assert_gradient_ascent(objective, inceptionv1, shape=[1, 224, 224, 3])
| {
"repo_name": "tensorflow/lucid",
"path": "tests/optvis/test_objectives.py",
"copies": "1",
"size": "5254",
"license": "apache-2.0",
"hash": -17442955994175260,
"line_mean": 33.1168831169,
"line_max": 90,
"alpha_frac": 0.7032736962,
"autogenerated": false,
"ratio": 3.2075702075702077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4410843903770208,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from blaze import SQL
from blaze import CSV
from blaze.api.into import into
from blaze.utils import tmpfile
import sqlalchemy
import os
import csv as csv_module
import subprocess
@pytest.yield_fixture
def engine():
with tmpfile('db') as filename:
engine = sqlalchemy.create_engine('sqlite:///' + filename)
yield engine
@pytest.yield_fixture
def csv():
data = [(1, 2), (10, 20), (100, 200)]
with tmpfile('csv') as filename:
csv = CSV(filename, 'w', schema='{a: int32, b: int32}')
csv.extend(data)
csv = CSV(filename, schema='{a: int32, b: int32}')
yield csv
def test_simple_into(engine, csv):
tbl = 'testtable_into_2'
sql = SQL(engine, tbl, schema= csv.schema)
into(sql, csv, if_exists="replace")
conn = sql.engine.raw_connection()
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' and name='{0}';".format(tbl))
sqlite_tbl_names = cursor.fetchall()
assert sqlite_tbl_names[0][0] == tbl
assert list(sql[:, 'a']) == [1, 10, 100]
assert list(sql[:, 'b']) == [2, 20, 200]
| {
"repo_name": "vitan/blaze",
"path": "blaze/tests/test_sqlite_into.py",
"copies": "1",
"size": "1200",
"license": "bsd-3-clause",
"hash": -8975521517304900000,
"line_mean": 23.4897959184,
"line_max": 99,
"alpha_frac": 0.64,
"autogenerated": false,
"ratio": 3.380281690140845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4520281690140845,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
np = pytest.importorskip('numpy')
import dask.array as da
from dask.array.utils import assert_eq
def test_ufunc_meta():
assert da.log.__name__ == 'log'
assert da.log.__doc__.replace(' # doctest: +SKIP', '') == np.log.__doc__
assert da.modf.__name__ == 'modf'
assert da.modf.__doc__.replace(' # doctest: +SKIP', '') == np.modf.__doc__
assert da.frexp.__name__ == 'frexp'
assert da.frexp.__doc__.replace(' # doctest: +SKIP', '') == np.frexp.__doc__
def test_ufunc():
for attr in ['nin', 'nargs', 'nout', 'ntypes', 'identity',
'signature', 'types']:
assert getattr(da.log, attr) == getattr(np.log, attr)
with pytest.raises(AttributeError):
da.log.not_an_attribute
assert repr(da.log) == repr(np.log)
assert 'nin' in dir(da.log)
assert 'outer' in dir(da.log)
binary_ufuncs = ['add', 'arctan2', 'copysign', 'divide', 'equal',
'floor_divide', 'fmax', 'fmin', 'fmod', 'greater',
'greater_equal', 'hypot', 'ldexp', 'less', 'less_equal',
'logaddexp', 'logaddexp2', 'logical_and', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'multiply',
'nextafter', 'not_equal', 'power', 'remainder', 'subtract',
'true_divide']
unary_ufuncs = ['absolute', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan',
'arctanh', 'cbrt', 'ceil', 'conj', 'cos', 'cosh', 'deg2rad',
'degrees', 'exp', 'exp2', 'expm1', 'fabs', 'fix', 'floor',
'i0', 'isfinite', 'isinf', 'isnan', 'log', 'log10', 'log1p',
'log2', 'logical_not', 'nan_to_num', 'negative', 'rad2deg',
'radians', 'reciprocal', 'rint', 'sign', 'signbit', 'sin',
'sinc', 'sinh', 'spacing', 'sqrt', 'square', 'tan', 'tanh',
'trunc']
@pytest.mark.parametrize('ufunc', unary_ufuncs)
def test_unary_ufunc(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(darr), np.ndarray)
assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)
@pytest.mark.parametrize('ufunc', binary_ufuncs)
def test_binary_ufunc(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr1 = np.random.randint(1, 100, size=(20, 20))
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(20, 20))
darr2 = da.from_array(arr2, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr1, darr2), da.Array)
assert_eq(dafunc(darr1, darr2), npfunc(arr1, arr2))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(darr1, darr2), np.ndarray)
assert_eq(npfunc(darr1, darr2), npfunc(arr1, arr2))
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr1, arr2), np.ndarray)
assert_eq(dafunc(arr1, arr2), npfunc(arr1, arr2))
# with scalar
assert isinstance(dafunc(darr1, 10), da.Array)
assert_eq(dafunc(darr1, 10), npfunc(arr1, 10))
assert isinstance(dafunc(10, darr1), da.Array)
assert_eq(dafunc(10, darr1), npfunc(10, arr1))
assert isinstance(dafunc(arr1, 10), np.ndarray)
assert_eq(dafunc(arr1, 10), npfunc(arr1, 10))
assert isinstance(dafunc(10, arr1), np.ndarray)
assert_eq(dafunc(10, arr1), npfunc(10, arr1))
def test_ufunc_outer():
arr1 = np.random.randint(1, 100, size=20)
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(10, 3))
darr2 = da.from_array(arr2, 3)
# Check output types
assert isinstance(da.add.outer(darr1, darr2), da.Array)
assert isinstance(da.add.outer(arr1, darr2), da.Array)
assert isinstance(da.add.outer(darr1, arr2), da.Array)
assert isinstance(da.add.outer(arr1, arr2), np.ndarray)
# Check mix of dimensions, dtypes, and numpy/dask/object
cases = [((darr1, darr2), (arr1, arr2)),
((darr2, darr1), (arr2, arr1)),
((darr2, darr1.astype('f8')), (arr2, arr1.astype('f8'))),
((darr1, arr2), (arr1, arr2)),
((darr1, 1), (arr1, 1)),
((1, darr2), (1, arr2)),
((1.5, darr2), (1.5, arr2)),
(([1, 2, 3], darr2), ([1, 2, 3], arr2)),
((darr1.sum(), darr2), (arr1.sum(), arr2)),
((np.array(1), darr2), (np.array(1), arr2))]
for (dA, dB), (A, B) in cases:
assert_eq(da.add.outer(dA, dB), np.add.outer(A, B))
# Check dtype kwarg works
assert_eq(da.add.outer(darr1, darr2, dtype='f8'),
np.add.outer(arr1, arr2, dtype='f8'))
with pytest.raises(ValueError):
da.add.outer(darr1, darr2, out=arr1)
with pytest.raises(ValueError):
da.sin.outer(darr1, darr2)
@pytest.mark.parametrize('ufunc', ['isreal', 'iscomplex', 'real', 'imag'])
def test_complex(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dareal = da.from_array(real, 3)
daimag = da.from_array(imag, 3)
dacomp = da.from_array(comp, 3)
assert_eq(dacomp.real, comp.real)
assert_eq(dacomp.imag, comp.imag)
assert_eq(dacomp.conj(), comp.conj())
for darr, arr in [(dacomp, comp), (dareal, real), (daimag, imag)]:
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(darr), np.ndarray)
assert_eq(npfunc(darr), npfunc(arr))
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr))
@pytest.mark.parametrize('ufunc', ['frexp', 'modf'])
def test_ufunc_2results(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
# applying Dask ufunc doesn't trigger computation
res1, res2 = dafunc(darr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
# applying NumPy ufunc triggers computation
res1, res2 = npfunc(darr)
assert isinstance(res1, np.ndarray)
assert isinstance(res2, np.ndarray)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
# applying Dask ufunc to normal ndarray triggers computation
res1, res2 = npfunc(darr)
assert isinstance(res1, np.ndarray)
assert isinstance(res2, np.ndarray)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
def test_clip():
x = np.random.normal(0, 10, size=(10, 10))
d = da.from_array(x, chunks=(3, 4))
assert_eq(x.clip(5), d.clip(5))
assert_eq(x.clip(1, 5), d.clip(1, 5))
assert_eq(x.clip(min=5), d.clip(min=5))
assert_eq(x.clip(max=5), d.clip(max=5))
assert_eq(x.clip(max=1, min=5), d.clip(max=1, min=5))
assert_eq(x.clip(min=1, max=5), d.clip(min=1, max=5))
def test_angle():
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dacomp = da.from_array(comp, 3)
assert_eq(da.angle(dacomp), np.angle(comp))
assert_eq(da.angle(dacomp, deg=True), np.angle(comp, deg=True))
assert isinstance(da.angle(comp), np.ndarray)
assert_eq(da.angle(comp), np.angle(comp))
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/tests/test_ufunc.py",
"copies": "1",
"size": "8152",
"license": "bsd-3-clause",
"hash": 6625311100792822000,
"line_mean": 33.2521008403,
"line_max": 83,
"alpha_frac": 0.6079489696,
"autogenerated": false,
"ratio": 2.978443551333577,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9084472193703248,
"avg_score": 0.00038406544606585597,
"num_lines": 238
} |
from __future__ import absolute_import, division, print_function
import pytest
psycopg2 = pytest.importorskip('psycopg2')
import subprocess
ps = subprocess.Popen("ps aux | grep postgres",shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
pytestmark = pytest.mark.skipif(len(output.split('\n')) < 6, reason="No Postgres Installation")
import sqlalchemy
from sqlalchemy import Table, Column, Integer
import os
import csv as csv_module
url = 'postgresql://localhost/postgres'
file_name = 'test.csv'
# @pytest.fixture(scope='module')
def setup_function(function):
data = [(1, 2), (10, 20), (100, 200)]
with open(file_name, 'w') as f:
csv_writer = csv_module.writer(f)
for row in data:
csv_writer.writerow(row)
def teardown_function(function):
os.remove(file_name)
engine = sqlalchemy.create_engine(url)
metadata = sqlalchemy.MetaData()
metadata.reflect(engine)
# for t in metadata.tables:
# if 'travisci' in t:
# metadata.tables[t].drop(engine)
def test_csv_postgres_load():
tbl = 'travisci_postgres'
engine = sqlalchemy.create_engine(url)
conn = engine.raw_connection()
m = sqlalchemy.MetaData()
t = Table(tbl, m,
Column('a', Integer),
Column('c', Integer)
)
m.create_all(engine)
cursor = conn.cursor()
full_path = os.path.abspath(file_name)
load = '''copy {} from '{}'(FORMAT CSV, DELIMITER ',', NULL '');'''.format(tbl, full_path)
cursor.execute(load)
conn.commit()
| {
"repo_name": "quasiben/TravisTesting",
"path": "test_postgres.py",
"copies": "1",
"size": "1530",
"license": "bsd-3-clause",
"hash": -6500757372008466000,
"line_mean": 25.8421052632,
"line_max": 95,
"alpha_frac": 0.6549019608,
"autogenerated": false,
"ratio": 3.477272727272727,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4632174688072727,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pymysql = pytest.importorskip('pymysql')
from decimal import Decimal
from datashape import var, DataShape, Record, dshape
import itertools
from odo.backends.csv import CSV
from odo import resource, odo
import sqlalchemy
import sqlalchemy as sa
import os
import sys
import csv as csv_module
import getpass
from odo import drop, discover
from odo.utils import tmpfile
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason='not well tested on win32 mysql')
username = getpass.getuser()
url = 'mysql+pymysql://{0}@localhost:3306/test'.format(username)
def create_csv(data, file_name):
with open(file_name, 'w') as f:
csv_writer = csv_module.writer(f)
for row in data:
csv_writer.writerow(row)
data = [(1, 2), (10, 20), (100, 200)]
data_floats = [(1.02, 2.02), (102.02, 202.02), (1002.02, 2002.02)]
@pytest.yield_fixture
def csv():
with tmpfile('.csv') as fn:
create_csv(data, fn)
yield CSV(fn)
@pytest.yield_fixture
def fcsv():
with tmpfile('.csv') as fn:
create_csv(data_floats, fn)
yield CSV(fn, columns=list('ab'))
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture
def name():
return next(names)
@pytest.fixture(scope='module')
def engine():
return sqlalchemy.create_engine(url)
@pytest.yield_fixture
def sql(engine, csv, name):
dshape = discover(csv)
dshape = DataShape(var,
Record([(n, typ)
for n, typ in zip('ab', dshape.measure.types)]))
try:
t = resource('%s::%s' % (url, name), dshape=dshape)
except sqlalchemy.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def fsql(engine, fcsv, name):
dshape = discover(fcsv)
dshape = DataShape(var,
Record([(n, typ)
for n, typ in zip('ab', dshape.measure.types)]))
try:
t = resource('%s::%s' % (url, name), dshape=dshape)
except sqlalchemy.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.fixture
def dcsv():
this_dir = os.path.dirname(__file__)
file_name = os.path.join(this_dir, 'dummydata.csv')
dshape = """var * {
Name: string,
RegistrationDate: date,
ZipCode: int64,
Consts: float64
}"""
return CSV(file_name, dshape=dshape)
@pytest.yield_fixture
def dsql(engine, dcsv, name):
try:
t = resource('%s::%s' % (url, name), dshape=discover(dcsv))
except sqlalchemy.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def decimal_sql(engine, name):
try:
t = resource('%s::%s' % (url, name),
dshape="var * {a: ?decimal[10, 3], b: decimal[11, 2]}")
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_csv_mysql_load(sql, csv):
engine = sql.bind
conn = engine.raw_connection()
cursor = conn.cursor()
full_path = os.path.abspath(csv.path)
load = '''LOAD DATA INFILE '{0}' INTO TABLE {1} FIELDS TERMINATED BY ','
lines terminated by '\n'
'''.format(full_path, sql.name)
cursor.execute(load)
conn.commit()
def test_simple_into(sql, csv):
odo(csv, sql, if_exists="replace")
assert odo(sql, list) == [(1, 2), (10, 20), (100, 200)]
def test_append(sql, csv):
odo(csv, sql, if_exists="replace")
assert odo(sql, list) == [(1, 2), (10, 20), (100, 200)]
odo(csv, sql, if_exists="append")
assert odo(sql, list) == [(1, 2), (10, 20), (100, 200),
(1, 2), (10, 20), (100, 200)]
def test_simple_float_into(fsql, fcsv):
odo(fcsv, fsql, if_exists="replace")
assert odo(fsql, list) == [(1.02, 2.02),
(102.02, 202.02),
(1002.02, 2002.02)]
def test_tryexcept_into(sql, csv):
# uses multi-byte character and fails over to using sql.extend()
odo(csv, sql, if_exists="replace", QUOTE="alpha", FORMAT="csv")
assert odo(sql, list) == [(1, 2), (10, 20), (100, 200)]
@pytest.mark.xfail(raises=KeyError)
def test_failing_argument(sql, csv):
odo(csv, sql, if_exists="replace", skipinitialspace="alpha")
def test_no_header_no_columns(sql, csv):
odo(csv, sql, if_exists="replace")
assert odo(sql, list) == [(1, 2), (10, 20), (100, 200)]
@pytest.mark.xfail
def test_complex_into(dsql, dcsv):
# data from: http://dummydata.me/generate
odo(dcsv, dsql, if_exists="replace")
assert odo(dsql, list) == odo(dcsv, list)
def test_sql_to_csv(sql, csv):
sql = odo(csv, sql)
with tmpfile('.csv') as fn:
csv = odo(sql, fn)
assert odo(csv, list) == data
# explicitly test that we do NOT preserve the header here
assert discover(csv).measure.names != discover(sql).measure.names
def test_sql_select_to_csv(sql, csv):
sql = odo(csv, sql)
query = sa.select([sql.c.a])
with tmpfile('.csv') as fn:
csv = odo(query, fn)
assert odo(csv, list) == [(x,) for x, _ in data]
def test_csv_output_does_not_preserve_header(sql, csv):
sql = odo(csv, sql)
expected = "1,2\n10,20\n100,200\n"
with tmpfile('.csv') as fn:
csv = odo(sql, fn)
with open(csv.path, 'rt') as f:
result = f.read()
assert result == expected
@pytest.mark.xfail(raises=AssertionError,
reason="Remove when all databases are being tested at once")
def test_different_encoding(name):
encoding = 'latin1'
try:
sql = odo(os.path.join(os.path.dirname(__file__), 'encoding.csv'),
url + '::%s' % name,
encoding=encoding)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
result = odo(sql, list)
expected = [(u'1958.001.500131-1A', 1, None, u'', 899),
(u'1958.001.500156-6', 1, None, u'', 899),
(u'1958.001.500162-1', 1, None, u'', 899),
(u'1958.001.500204-2', 1, None, u'', 899),
(u'1958.001.500204-2A', 1, None, u'', 899),
(u'1958.001.500204-2B', 1, None, u'', 899),
(u'1958.001.500223-6', 1, None, u'', 9610),
(u'1958.001.500233-9', 1, None, u'', 4703),
(u'1909.017.000018-3', 1, 30.0, u'sumaria', 899)]
assert result == expected
finally:
drop(sql)
def test_decimal(decimal_sql):
t = sa.Table(decimal_sql.name, sa.MetaData(decimal_sql.bind), autoload=True)
assert discover(t) == dshape(
"var * {a: ?decimal[10, 3], b: decimal[11, 2]}"
)
assert isinstance(t.c.a.type, sa.Numeric)
assert isinstance(t.c.b.type, sa.Numeric)
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/tests/test_mysql.py",
"copies": "1",
"size": "7217",
"license": "bsd-3-clause",
"hash": -5976768823582634000,
"line_mean": 26.5458015267,
"line_max": 80,
"alpha_frac": 0.56089788,
"autogenerated": false,
"ratio": 3.203284509542832,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4264182389542832,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('dask')
from toolz import merge
from odo.backends.dask import append
from dask.array.core import insert_to_ooc, Array
from dask import core
from odo import convert, into, odo
from odo.utils import tmpfile
import numpy as np
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, chunks=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, chunks=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
bcolz = pytest.importorskip('bcolz')
x = np.arange(600).reshape((20, 30))
a = into(Array, x, chunks=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('.hdf5') as fn:
h = odo(a, fn + '::/data')
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
bcolz = pytest.importorskip('bcolz')
x = np.arange(600).reshape((20, 30))
a = into(Array, x, chunks=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, chunks=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test_array_interface():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, chunks=(4, 5))
assert eq(x, np.array(d))
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/backends/tests/test_dask_array.py",
"copies": "9",
"size": "1748",
"license": "bsd-3-clause",
"hash": -6406182462798293000,
"line_mean": 22,
"line_max": 64,
"alpha_frac": 0.6035469108,
"autogenerated": false,
"ratio": 2.814814814814815,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032775360956009943,
"num_lines": 76
} |
from __future__ import absolute_import, division, print_function
import pytest
sa = pytest.importorskip('sqlalchemy')
import itertools
from distutils.version import LooseVersion
import datashape
from odo import into, resource, discover
from pandas import DataFrame
from toolz import unique
from blaze.compute.sql import compute, select, lower_column, compute_up
from blaze.expr import (
symbol, transform, summary, by, sin, join,
floor, cos, merge, nunique, mean, sum, count, exp
)
from blaze.compatibility import xfail
from blaze.utils import tmpfile, example, normalize
def computefull(t, s):
return select(compute(t, s))
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def data():
# make the engine
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
# name table
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
# city table
city = sa.Table('city', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
city.create()
s = symbol('s', discover(engine))
return {'engine': engine, 'metadata': metadata, 'name': name, 'city': city,
's': s}
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
tdate = symbol('t',
"""var * {
name: string,
amount: int,
id: int,
occurred_on: datetime
}""")
ns = sa.Table('nullaccounts', metadata,
sa.Column('name', sa.String, nullable=True),
sa.Column('amount', sa.REAL),
sa.Column('id', sa.Integer, primary_key=True),
)
sdate = sa.Table('accdate', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('occurred_on', sa.DateTime))
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s, post_compute=False)) == \
str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s, post_compute=False)) == \
str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(compute(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s, post_compute=False)) == \
str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s, post_compute=False)) == \
str(s.c.amount * s.c.id)
assert str(compute(t['amount'] * 2, s, post_compute=False)) == \
str(s.c.amount * 2)
assert str(compute(2 * t['amount'], s, post_compute=False)) == \
str(2 * s.c.amount)
assert (str(compute(~(t['amount'] > 10), s, post_compute=False)) ==
"accounts.amount <= :amount_1")
assert str(compute(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
# test sort on join
result = compute(joined.sort('amount'), {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
select
anon_1.name,
anon_1.amount,
anon_1.id
from (select
amounts.name as name,
amounts.amount as amount,
ids.id as id
from
amounts
join
ids
on
amounts.name = ids.name) as anon_1
order by
anon_1.amount asc""")
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
expected1 = """
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1"""
expected2 = """
SELECT alias.name, alias.amount, ids.id
FROM (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) AS alias
JOIN ids ON alias.name = ids.name"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s, post_compute=False)) == \
str(sa.func.exp(s.c.amount))
assert str(compute(-t['amount'], s, post_compute=False)) == \
str(-s.c.amount)
@pytest.mark.parametrize('unbiased', [True, False])
def test_std(unbiased):
assert str(compute(t.amount.std(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'stddev_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
@pytest.mark.parametrize('unbiased', [True, False])
def test_var(unbiased):
assert str(compute(t.amount.var(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'var_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
def test_reductions():
assert str(compute(sum(t['amount']), s, post_compute=False)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(
sum(t['amount']), s, post_compute=False).name
def test_reduction_with_invalid_axis_argument():
with pytest.raises(ValueError):
compute(t.amount.mean(axis=1))
with pytest.raises(ValueError):
compute(t.count(axis=1))
with pytest.raises(ValueError):
compute(t[['amount', 'id']].count(axis=1))
def test_nelements():
rhs = str(compute(t.count(), s))
assert str(compute(t.nelements(), s)) == rhs
assert str(compute(t.nelements(axis=None), s)) == rhs
assert str(compute(t.nelements(axis=0), s)) == rhs
assert str(compute(t.nelements(axis=(0,)), s)) == rhs
@pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for"
" Record datashapes")
def test_nelements_axis_1():
assert compute(t.nelements(axis=1), s) == len(s.columns)
def test_count_on_table():
result = compute(t.count(), s)
assert normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
result = compute(t[t.amount > 0].count(), s)
assert (
normalize(str(result)) == normalize("""
SELECT count(accounts.id) as t_count
FROM accounts
WHERE accounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.id) as t_count
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
WHERE accounts.amount > :amount_1) as alias_2"""))
def test_distinct():
result = str(compute(t['amount'].distinct(), s, post_compute=False))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
def test_nunique_table():
result = normalize(str(computefull(t.nunique(), s)))
expected = normalize("""SELECT count(alias.id) AS tbl_row_count
FROM (SELECT DISTINCT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
assert result == expected
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sa.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], total=t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('total')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], total=t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT alias.name, sum(alias.amount) as total
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS ID
FROM accounts
LIMIT :param_1) as alias
GROUP BY alias.name"""
expected = """
SELECT accounts.name, sum(accounts.amount) as total
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('total')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS total
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_by_summary_single_column():
expr = by(t.name, n=t.name.count(), biggest=t.name.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(sa.asc(s.c.amount)))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount)))
def test_multicolumn_sort():
assert str(compute(t.sort(['amount', 'id']), s)) == \
str(select(s).order_by(sa.asc(s.c.amount), sa.asc(s.c.id)))
assert str(compute(t.sort(['amount', 'id'], ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount), sa.desc(s.c.id)))
def test_sort_on_distinct():
assert normalize(str(compute(t.amount.sort(), s))) == normalize("""
SELECT accounts.amount
FROM accounts
ORDER BY accounts.amount ASC""")
assert normalize(str(compute(t.amount.distinct().sort(), s))) == normalize("""
SELECT DISTINCT accounts.amount as amount
FROM accounts
ORDER BY amount ASC""")
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert (str(compute((t['amount'] * 10).label('foo'),
s, post_compute=False)) ==
str((s.c.amount * 10).label('foo')))
def test_relabel_table():
result = compute(t.relabel(name='NAME', id='ID'), s)
expected = select([
s.c.name.label('NAME'),
s.c.amount,
s.c.id.label('ID'),
])
assert str(result) == str(expected)
def test_relabel_projection():
result = compute(
t[['name', 'id']].relabel(name='new_name', id='new_id'),
s,
)
assert normalize(str(result)) == normalize(
"""SELECT
accounts.name AS new_name,
accounts.id AS new_id
FROM accounts""",
)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_outer_join():
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
engine = resource(uri)
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = resource(uri, 'left', dshape=L.dshape)
into(left, _left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = resource(uri, 'right', dshape=R.dshape)
into(right, _right)
conn = engine.connect()
query = compute(join(L, R, how='inner'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'),
{L: left, R: right},
post_compute=False)
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = symbol('city', discover(city))
tfriends = symbol('friends', discover(friends))
tname = symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
result = compute(expr, ns)
expected1 = """
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
"""
expected2 = """
SELECT alias.a, alias.b, alias.name, place.city, place.country
FROM (SELECT friends.a AS a, friends.b AS b, name.name AS name
FROM friends JOIN name ON friends.a = name.id) AS alias
JOIN place ON alias.a = place.id
"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_strlen():
expr = t.name.strlen()
result = str(compute(expr, s))
expected = "SELECT char_length(accounts.name) as name FROM accounts"
assert normalize(result) == normalize(expected)
def test_columnwise_on_complex_selection():
result = str(select(compute(t[t.amount > 0].amount + 1, s)))
assert normalize(result) == \
normalize("""
SELECT accounts.amount + :amount_1 AS amount
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert (
normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) ==
normalize(
"""
select
sum(alias.id) as id_sum
from (select
accounts.id as id
from accounts
where accounts.amount > :amount_1) as alias
"""
)
)
def test_clean_summary_by_where():
t2 = t[t.id == 1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
expected1 = """
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1"""
expected2 = """
SELECT alias.id, alias.name, place.city, place.country
FROM (SELECT name.id as id, name.name AS name
FROM name
WHERE name.id > :id_1) AS alias
JOIN place ON alias.id = place.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
result = compute(expr, ns)
expected1 = """
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1"""
expected2 = """
SELECT alias.country, name.name
FROM name
JOIN (SELECT place.id AS id, place.city AS city, place.country AS country
FROM place
WHERE place.city = :city_1) AS alias
ON name.id = alias_6.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_join_on_same_table():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
expr = join(t, t, 'a')
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab_left.a,
tab_left.b as b_left,
tab_right.b as b_right
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
expr = join(t, t, 'a').b_left.sum()
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
select sum(alias.b_left) as b_left_sum from
(select
tab_left.b as b_left
from
tab as tab_left
join
tab as tab_right
on
tab_left.a = tab_right.a) as
alias""")
expr = join(t, t, 'a')
expr = summary(total=expr.a.sum(), smallest=expr.b_right.min())
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
min(tab_right.b) as smallest,
sum(tab_left.a) as total
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
def test_join_suffixes():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
suffixes = '_l', '_r'
expr = join(t, t, 'a', suffixes=suffixes)
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab{l}.a,
tab{l}.b as b{l},
tab{r}.b as b{r}
FROM
tab AS tab{l}
JOIN
tab AS tab{r}
ON
tab{l}.a = tab{r}.a
""".format(l=suffixes[0], r=suffixes[1]))
def test_field_access_on_engines(data):
s, engine = data['s'], data['engine']
result = compute_up(s.city, engine)
assert isinstance(result, sa.Table)
assert result.name == 'city'
def test_computation_directly_on_sqlalchemy_Tables(data):
name = data['name']
s = symbol('s', discover(name))
result = into(list, compute(s.id + 1, name))
assert not isinstance(result, sa.sql.Selectable)
assert list(result) == []
def test_computation_directly_on_metadata(data):
metadata = data['metadata']
name = data['name']
s = symbol('s', discover(metadata))
result = compute(s.name, {s: metadata}, post_compute=False)
assert result == name
sql_bank = sa.Table('bank', sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
sql_cities = sa.Table('cities', sa.MetaData(),
sa.Column('name', sa.String),
sa.Column('city', sa.String))
bank = symbol('bank', discover(sql_bank))
cities = symbol('cities', discover(sql_cities))
def test_aliased_views_with_two_group_bys():
expr = by(bank.name, total=bank.amount.sum())
expr2 = by(expr.total, count=expr.name.count())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(alias.name) as count
FROM (SELECT bank.name AS name, sum(bank.amount) AS total
FROM bank
GROUP BY bank.name) as alias
GROUP BY alias.total
""")
def test_aliased_views_with_join():
joined = join(bank, cities)
expr = by(joined.city, total=joined.amount.sum())
expr2 = by(expr.total, count=expr.city.nunique())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(DISTINCT alias.city) AS count
FROM (SELECT cities.city AS city, sum(bank.amount) AS total
FROM bank
JOIN cities ON bank.name = cities.name
GROUP BY cities.city) as alias
GROUP BY alias.total
""")
def test_select_field_on_alias():
result = compute_up(t.amount, select(s).limit(10).alias('foo'))
assert normalize(str(select(result))) == normalize("""
SELECT foo.amount
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
LIMIT :param_1) as foo""")
@pytest.mark.xfail(raises=Exception,
reason="sqlalchemy.join seems to drop unnecessary tables")
def test_join_on_single_column():
expr = join(cities[['name']], bank)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
expr = join(bank, cities.name)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
def test_aliased_views_more():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
expr = join(by(L.x, y_total=L.y.sum()),
R)
result = compute(expr, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x """)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
result2 = compute(expr2, {L: lhs, R: rhs})
assert (
normalize(str(result2)) == normalize("""
SELECT alias_2.w, count(alias_2.x) as count, sum(alias_2.y_total) as total2
FROM (SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x) AS alias_2
GROUP BY alias_2.w""")
or
normalize(str(result2)) == normalize("""
SELECT bbb.w, count(alias.x) as count, sum(alias.y_total) as total2
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) as alias
JOIN bbb ON alias.x = bbb.x
GROUP BY bbb.w"""))
def test_aliased_views_with_computation():
engine = sa.create_engine('sqlite:///:memory:')
df_aaa = DataFrame({'x': [1, 2, 3, 2, 3],
'y': [2, 1, 2, 3, 1],
'z': [3, 3, 3, 1, 2]})
df_bbb = DataFrame({'w': [1, 2, 3, 2, 3],
'x': [2, 1, 2, 3, 1],
'y': [3, 3, 3, 1, 2]})
df_aaa.to_sql('aaa', engine)
df_bbb.to_sql('bbb', engine)
metadata = sa.MetaData(engine)
metadata.reflect()
sql_aaa = metadata.tables['aaa']
sql_bbb = metadata.tables['bbb']
L = symbol('aaa', discover(df_aaa))
R = symbol('bbb', discover(df_bbb))
expr = join(by(L.x, y_total=L.y.sum()),
R)
a = compute(expr, {L: df_aaa, R: df_bbb})
b = compute(expr, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
a = compute(expr2, {L: df_aaa, R: df_bbb})
b = compute(expr2, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr3 = by(expr.x, count=expr.y_total.count())
a = compute(expr3, {L: df_aaa, R: df_bbb})
b = compute(expr3, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr4 = join(expr2, R)
a = compute(expr4, {L: df_aaa, R: df_bbb})
b = compute(expr4, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
""" # Takes a while
expr5 = by(expr4.count, total=(expr4.x + expr4.y).sum())
a = compute(expr5, {L: df_aaa, R: df_bbb})
b = compute(expr5, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
"""
def test_distinct_count_on_projection():
expr = t[['amount']].distinct().count()
result = compute(expr, {t: s})
assert (
normalize(str(result)) == normalize("""
SELECT count(DISTINCT accounts.amount)
FROM accounts""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.amount) as count
FROM (SELECT DISTINCT accounts.amount AS amount
FROM accounts) as alias"""))
# note that id is the primary key
expr = t[['amount', 'id']].distinct().count()
result = compute(expr, {t: s})
assert normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT DISTINCT accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
def test_join_count():
ds = datashape.dshape(
'{t1: var * {x: int, y: int}, t2: var * {a: int, b: int}}')
engine = resource('sqlite:///:memory:', dshape=ds)
db = symbol('db', ds)
expr = join(db.t1[db.t1.x > -1], db.t2, 'x', 'a').count()
result = compute(expr, {db: engine}, post_compute=False)
expected1 = """
SELECT count(alias.x) as count
FROM (SELECT t1.x AS x, t1.y AS y, t2.b AS b
FROM t1 JOIN t2 ON t1.x = t2.a
WHERE t1.x > ?) as alias
"""
expected2 = """
SELECT count(alias2.x) AS count
FROM (SELECT alias1.x AS x, alias1.y AS y, t2.b AS b
FROM (SELECT t1.x AS x, t1.y AS y
FROM t1
WHERE t1.x > ?) AS alias1
JOIN t2 ON alias1.x = t2.a) AS alias2"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_transform_where():
t2 = t[t.id == 1]
expr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
abs(accounts.amount) as abs_amt,
sin(accounts.id) as sine
FROM accounts
WHERE accounts.id = :id_1
"""
assert normalize(str(result)) == normalize(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_merge_where():
t2 = t[t.id == 1]
expr = merge(t2[['amount', 'name']], t2.id)
result = compute(expr, s)
expected = normalize("""SELECT
accounts.amount,
accounts.name,
accounts.id
FROM accounts
WHERE accounts.id = :id_1
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_single_column():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_multiple_columns():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean(), sum_sine=tr.sine.sum())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) AS sum_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_different_order():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr.name,
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_projection():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr[['name', 'id']],
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
accounts.id,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name, accounts.id
""")
assert normalize(str(result)) == expected
def test_merge_compute():
data = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
ds = datashape.dshape('var * {id: int, name: string, amount: real}')
s = symbol('s', ds)
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
into(uri + '::table', data, dshape=ds)
expr = transform(s, amount10=s.amount * 10)
result = into(list, compute(expr, {s: data}))
assert result == [(1, 'Alice', 100, 1000),
(2, 'Bob', 200, 2000),
(4, 'Dennis', 400, 4000)]
def test_notnull():
result = compute(nt[nt.name.notnull()], ns)
expected = """SELECT
nullaccounts.name,
nullaccounts.amount,
nullaccounts.id
FROM nullaccounts
WHERE nullaccounts.name is not null
"""
assert normalize(str(result)) == normalize(expected)
def test_head_limit():
assert compute(t.head(5).head(10), s)._limit == 5
assert compute(t.head(10).head(5), s)._limit == 5
assert compute(t.head(10).head(10), s)._limit == 10
def test_no_extraneous_join():
ds = """ {event: var * {name: ?string,
operation: ?string,
datetime_nearest_receiver: ?datetime,
aircraft: ?string,
temperature_2m: ?float64,
temperature_5cm: ?float64,
humidity: ?float64,
windspeed: ?float64,
pressure: ?float64,
include: int64},
operation: var * {name: ?string,
runway: int64,
takeoff: bool,
datetime_nearest_close: ?string}}
"""
db = resource('sqlite:///:memory:', dshape=ds)
d = symbol('db', dshape=ds)
expr = join(d.event[d.event.include == True],
d.operation[['name', 'datetime_nearest_close']],
'operation', 'name')
result = compute(expr, db)
assert normalize(str(result)) == normalize("""
SELECT
alias.operation,
alias.name as name_left,
alias.datetime_nearest_receiver,
alias.aircraft,
alias.temperature_2m,
alias.temperature_5cm,
alias.humidity,
alias.windspeed,
alias.pressure,
alias.include,
alias.datetime_nearest_close
FROM
(SELECT
event.name AS name,
event.operation AS operation,
event.datetime_nearest_receiver AS datetime_nearest_receiver,
event.aircraft AS aircraft,
event.temperature_2m AS temperature_2m,
event.temperature_5cm AS temperature_5cm,
event.humidity AS humidity,
event.windspeed AS windspeed,
event.pressure AS pressure,
event.include AS include
FROM
event WHERE event.include = 1) AS alias1
JOIN
(SELECT
operation.name AS name,
operation.datetime_nearest_close as datetime_nearest_close
FROM operation) AS alias2
ON
alias1.operation = alias2.name
""")
def test_math():
result = compute(sin(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT sin(accounts.amount) as amount
FROM accounts""")
result = compute(floor(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount) as amount
FROM accounts""")
result = compute(t.amount // 2, s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount / :amount_1) AS amount
FROM accounts""")
def test_transform_order():
r = transform(t, sin_amount=sin(t.amount), cos_id=cos(t.id))
result = compute(r, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
cos(accounts.id) as cos_id,
sin(accounts.amount) as sin_amount
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_isin():
result = t[t.name.isin(['foo', 'bar'])]
result_sql_expr = str(compute(result, s))
expected = """
SELECT
accounts.name,
accounts.amount,
accounts.id
FROM
accounts
WHERE
accounts.name
IN
(:name_1,
:name_2)
"""
assert normalize(result_sql_expr) == normalize(expected)
@pytest.mark.skipif('1.0.0' <= LooseVersion(sa.__version__) <= '1.0.1',
reason=("SQLAlchemy generates different code in 1.0.0"
" and 1.0.1"))
def test_date_grouper_repeats_not_one_point_oh():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY EXTRACT(year FROM t.ds)
"""
assert normalize(result) == normalize(expected)
@pytest.mark.skipif(LooseVersion(sa.__version__) < '1.0.0' or
LooseVersion(sa.__version__) >= '1.0.2',
reason=("SQLAlchemy generates different code in < 1.0.0 "
"and >= 1.0.2"))
def test_date_grouper_repeats():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY ds_year
"""
assert normalize(result) == normalize(expected)
def test_transform_then_project_single_column():
expr = transform(t, foo=t.id + 1)[['foo', 'id']]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_transform_then_project():
proj = ['foo', 'id']
expr = transform(t, foo=t.id + 1)[proj]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_reduce_does_not_compose():
expr = by(t.name, counts=t.count()).counts.max()
result = str(compute(expr, s))
expected = """
SELECT max(alias.counts) AS counts_max
FROM
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name) as alias"""
assert normalize(result) == normalize(expected)
@pytest.mark.xfail(raises=NotImplementedError)
def test_normalize_reduction():
expr = by(t.name, counts=t.count())
expr = transform(expr, normed_counts=expr.counts / expr.counts.max())
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT alias.counts / max(alias.counts) AS normed_counts
FROM alias"""
assert normalize(result) == normalize(expected)
def test_do_not_erase_group_by_functions_with_datetime():
t, s = tdate, sdate
expr = by(t[t.amount < 0].occurred_on.date,
avg_amount=t[t.amount < 0].amount.mean())
result = str(compute(expr, s))
expected = """SELECT
date(accdate.occurred_on) as occurred_on_date,
avg(accdate.amount) as avg_amount
FROM
accdate
WHERE
accdate.amount < :amount_1
GROUP BY
date(accdate.occurred_on)
"""
assert normalize(result) == normalize(expected)
def test_not():
expr = t.amount[~t.name.isin(('Billy', 'Bob'))]
result = str(compute(expr, s))
expected = """SELECT
accounts.amount
FROM
accounts
WHERE
accounts.name not in (:name_1, :name_2)
"""
assert normalize(result) == normalize(expected)
def test_slice():
start, stop, step = 50, 100, 1
result = str(compute(t[start:stop], s))
# Verifies that compute is translating the query correctly
assert result == str(select(s).offset(start).limit(stop))
# Verifies the query against expected SQL query
expected = """
SELECT accounts.name, accounts.amount, accounts.id FROM accounts
LIMIT :param_1 OFFSET :param_2
"""
assert normalize(str(result)) == normalize(str(expected))
# Step size of 1 should be alright
compute(t[start:stop:step], s)
@pytest.mark.xfail(raises=ValueError)
def test_slice_step():
start, stop, step = 50, 100, 2
compute(t[start:stop:step], s)
def test_datetime_to_date():
expr = tdate.occurred_on.date
result = str(compute(expr, sdate))
expected = """SELECT
DATE(accdate.occurred_on) as occurred_on_date
FROM
accdate
"""
assert normalize(result) == normalize(expected)
def test_sort_compose():
expr = t.name[:5].sort()
result = compute(expr, s)
expected = """select
anon_1.name
from (select
accounts.name as name
from
accounts
limit :param_1
offset :param_2) as anon_1
order by
anon_1.name asc"""
assert normalize(str(result)) == normalize(expected)
assert (normalize(str(compute(t.sort('name').name[:5], s))) !=
normalize(expected))
def test_coerce():
expr = t.amount.coerce(to='int64')
expected = """SELECT
cast(accounts.amount AS BIGINT) AS amount
FROM accounts"""
result = compute(expr, s)
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform():
tbl = transform(t, new_amount=t.amount + 1, one_two=t.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform_and_filter():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_access_on_transform_filter():
tbl = transform(t, new_amount=t.amount + 1)
expr = tbl[tbl.name == 'Alice'].new_amount
result = compute(expr, s)
expected = """SELECT
accounts.amount + :amount_1 as new_amount
FROM
accounts
WHERE
accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_on_filter_transform_groupby():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
gb = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
expr = gb.avg_amt
result = compute(expr, s)
expected = """SELECT
avg(accounts.amount + :amount_1) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_2
"""
assert normalize(str(result)) == normalize(expected)
def test_label_projection():
tbl = t[(t.name == 'Alice')]
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = tbl[['new_amount', 'one_two']]
# column selection shouldn't affect the resulting SQL
result = compute(expr[expr.new_amount > 1].one_two, s)
result2 = compute(expr.one_two[expr.new_amount > 1], s)
expected = """SELECT
accounts.amount * :amount_1 as one_two
FROM accounts
WHERE accounts.name = :name_1 and accounts.amount + :amount_2 > :param_1
"""
assert normalize(str(result)) == normalize(expected)
assert normalize(str(result2)) == normalize(expected)
def test_baseball_nested_by():
data = resource('sqlite:///%s' % example('teams.db'))
dshape = discover(data)
d = symbol('d', dshape)
expr = by(d.teams.name,
start_year=d.teams.yearID.min()).start_year.count_values()
result = compute(expr, data, post_compute=False)
expected = """SELECT
anon_1.start_year,
anon_1.count
FROM
(SELECT
alias.start_year as start_year,
count(alias.start_year) as count
FROM
(SELECT
min(teams.yearid) as start_year
FROM teams
GROUP BY teams.name) as alias
GROUP BY alias.start_year) as anon_1 ORDER BY anon_1.count DESC
"""
assert normalize(str(result).replace('"', '')) == normalize(expected)
def test_label_on_filter():
expr = t[t.name == 'Alice'].amount.label('foo').head(2)
result = compute(expr, s)
expected = """SELECT
accounts.amount AS foo
FROM
accounts
WHERE
accounts.name = :name_1
LIMIT :param_1
"""
assert normalize(str(result)) == normalize(expected)
def test_single_field_filter():
expr = t.amount[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.amount
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multiple_field_filter():
expr = t.name[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.name
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_distinct_on_label():
expr = t.name.label('foo').distinct()
result = compute(expr, s)
expected = """SELECT
DISTINCT accounts.name AS foo
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.parametrize('n', [-1, 0, 1])
def test_shift_on_column(n):
expr = t.name.shift(n)
result = compute(expr, s)
expected = """SELECT
lag(accounts.name, :lag_1) over () as name
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_empty_string_comparison_with_option_type():
expr = nt.amount[nt.name == '']
result = compute(expr, s)
expected = """
SELECT accounts.amount
FROM accounts
WHERE accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_tail_no_sort():
assert (
normalize(str(compute(t.head(), {t: s}))) ==
normalize(str(compute(t.tail(), {t: s})))
)
def test_tail_of_sort():
expected = normalize(str(compute(
t.sort('id', ascending=False).head(5).sort('id'),
{t: s},
)))
result = normalize(str(compute(t.sort('id').tail(5), {t: s})))
assert expected == result
def test_tail_sort_in_chilren():
expected = normalize(str(compute(
t.name.sort('id', ascending=False).head(5).sort('id'),
{t: s},
)))
result = normalize(str(compute(t.name.sort('id').tail(5), {t: s})))
assert expected == result
def test_selection_inner_inputs():
result = normalize(str(compute(t[t.id == tdate.id], {t: s, tdate: sdate})))
expected = normalize("""
select {a}.name, {a}.amount, {a}.id from {a}, {b} where {a}.id = {b}.id
""").format(a=s.name, b=sdate.name)
assert result == expected
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/compute/tests/test_sql_compute.py",
"copies": "1",
"size": "57932",
"license": "bsd-3-clause",
"hash": 9071161787863940000,
"line_mean": 29.3626834382,
"line_max": 89,
"alpha_frac": 0.5619174204,
"autogenerated": false,
"ratio": 3.520204168438962,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9580626685118601,
"avg_score": 0.00029898074407222263,
"num_lines": 1908
} |
from __future__ import absolute_import, division, print_function
import pytest
sa = pytest.importorskip('sqlalchemy')
import itertools
import re
from distutils.version import LooseVersion
import datashape
from odo import into, resource, discover
from pandas import DataFrame
from toolz import unique
from blaze.compute.sql import (compute, computefull, select, lower_column,
compute_up)
from blaze.expr import (
symbol, discover, transform, summary, by, sin, join,
floor, cos, merge, nunique, mean, sum, count, exp, concat,
)
from blaze.compatibility import xfail
from blaze.utils import tmpfile, example
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def data():
# make the engine
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
# name table
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
# city table
city = sa.Table('city', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
city.create()
s = symbol('s', discover(engine))
return {'engine': engine, 'metadata': metadata, 'name': name, 'city': city,
's': s}
t = symbol('t', 'var * {name: string, amount: int, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
tdate = symbol('t',
"""var * {
name: string,
amount: int,
id: int,
occurred_on: datetime
}""")
sdate = sa.Table('accdate', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('occurred_on', sa.DateTime))
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s, post_compute=False)) == \
str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s, post_compute=False)) == \
str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(compute(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s, post_compute=False)) == \
str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s, post_compute=False)) == \
str(s.c.amount * s.c.id)
assert str(compute(t['amount'] * 2, s, post_compute=False)) == \
str(s.c.amount * 2)
assert str(compute(2 * t['amount'], s, post_compute=False)) == \
str(2 * s.c.amount)
assert (str(compute(~(t['amount'] > 10), s, post_compute=False)) ==
"accounts.amount <= :amount_1")
assert str(compute(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
# test sort on join
result = compute(joined.sort('amount'), {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
select
anon_1.name,
anon_1.amount,
anon_1.id
from (select
amounts.name as name,
amounts.amount as amount,
ids.id as id
from
amounts
join
ids
on
amounts.name = ids.name) as anon_1
order by
anon_1.amount asc""")
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
expected1 = """
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1"""
expected2 = """
SELECT alias.name, alias.amount, ids.id
FROM (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) AS alias
JOIN ids ON alias.name = ids.name"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s, post_compute=False)) == \
str(sa.func.exp(s.c.amount))
assert str(compute(-t['amount'], s, post_compute=False)) == \
str(-s.c.amount)
@pytest.mark.parametrize('unbiased', [True, False])
def test_std(unbiased):
assert str(compute(t.amount.std(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'stddev_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
@pytest.mark.parametrize('unbiased', [True, False])
def test_var(unbiased):
assert str(compute(t.amount.var(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'var_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
def test_reductions():
assert str(compute(sum(t['amount']), s, post_compute=False)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(
sum(t['amount']), s, post_compute=False).name
def test_reduction_with_invalid_axis_argument():
with pytest.raises(ValueError):
compute(t.amount.mean(axis=1))
with pytest.raises(ValueError):
compute(t.count(axis=1))
with pytest.raises(ValueError):
compute(t[['amount', 'id']].count(axis=1))
def test_nelements():
rhs = str(compute(t.count(), s))
assert str(compute(t.nelements(), s)) == rhs
assert str(compute(t.nelements(axis=None), s)) == rhs
assert str(compute(t.nelements(axis=0), s)) == rhs
assert str(compute(t.nelements(axis=(0,)), s)) == rhs
@pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for"
" Record datashapes")
def test_nelements_axis_1():
assert compute(t.nelements(axis=1), s) == len(s.columns)
def test_count_on_table():
result = compute(t.count(), s)
assert normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
result = compute(t[t.amount > 0].count(), s)
assert (
normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts
WHERE accounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
WHERE accounts.amount > :amount_1) as alias"""))
def test_distinct():
result = str(compute(t['amount'].distinct(), s, post_compute=False))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
def test_nunique_table():
result = normalize(str(computefull(t.nunique(), s)))
expected = normalize("""SELECT count(alias.id) AS tbl_row_count
FROM (SELECT DISTINCT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
assert result == expected
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sa.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], total=t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('total')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], total=t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT alias.name, sum(alias.amount) as total
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS ID
FROM accounts
LIMIT :param_1) as alias
GROUP BY alias.name"""
expected = """
SELECT accounts.name, sum(accounts.amount) as total
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('total')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS total
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_by_summary_single_column():
expr = by(t.name, n=t.name.count(), biggest=t.name.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(sa.asc(s.c.amount)))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount)))
def test_multicolumn_sort():
assert str(compute(t.sort(['amount', 'id']), s)) == \
str(select(s).order_by(sa.asc(s.c.amount), sa.asc(s.c.id)))
assert str(compute(t.sort(['amount', 'id'], ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount), sa.desc(s.c.id)))
def test_sort_on_distinct():
assert normalize(str(compute(t.amount.sort(), s))) == normalize("""
SELECT accounts.amount
FROM accounts
ORDER BY accounts.amount ASC""")
assert normalize(str(compute(t.amount.distinct().sort(), s))) == normalize("""
SELECT DISTINCT accounts.amount as amount
FROM accounts
ORDER BY amount ASC""")
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert str(compute((t['amount'] * 10).label('foo'), s, post_compute=False))\
== str((s.c.amount * 10).label('foo'))
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), s)
expected = select([s.c.name.label('NAME'), s.c.amount, s.c.id.label('ID')])
assert str(result) == str(expected)
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_outer_join():
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
engine = resource(uri)
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = resource(uri, 'left', dshape=L.dshape)
into(left, _left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = resource(uri, 'right', dshape=R.dshape)
into(right, _right)
conn = engine.connect()
query = compute(join(L, R, how='inner'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'),
{L: left, R: right},
post_compute=False)
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = symbol('city', discover(city))
tfriends = symbol('friends', discover(friends))
tname = symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
result = compute(expr, ns)
expected1 = """
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
"""
expected2 = """
SELECT alias.a, alias.b, alias.name, place.city, place.country
FROM (SELECT friends.a AS a, friends.b AS b, name.name AS name
FROM friends JOIN name ON friends.a = name.id) AS alias
JOIN place ON alias.a = place.id
"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_strlen():
expr = t.name.strlen()
result = str(compute(expr, s))
expected = "SELECT char_length(accounts.name) as name FROM accounts"
assert normalize(result) == normalize(expected)
def test_columnwise_on_complex_selection():
result = str(select(compute(t[t.amount > 0].amount + 1, s)))
assert normalize(result) == \
normalize("""
SELECT accounts.amount + :amount_1 AS amount
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) == \
normalize("""
with alias as
(select accounts.id as id
from
accounts
where
accounts.amount > :amount_1)
select sum(alias.id) as id_sum from alias""")
def test_clean_summary_by_where():
t2 = t[t.id == 1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
expected1 = """
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1"""
expected2 = """
SELECT alias.id, alias.name, place.city, place.country
FROM (SELECT name.id as id, name.name AS name
FROM name
WHERE name.id > :id_1) AS alias
JOIN place ON alias.id = place.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
result = compute(expr, ns)
expected1 = """
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1"""
expected2 = """
SELECT alias.country, name.name
FROM name
JOIN (SELECT place.id AS id, place.city AS city, place.country AS country
FROM place
WHERE place.city = :city_1) AS alias
ON name.id = alias_6.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_join_on_same_table():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
expr = join(t, t, 'a')
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT tab_left.a, tab_left.b, tab_right.b
FROM tab AS tab_left JOIN tab AS tab_right
ON tab_left.a = tab_right.a
""")
expr = join(t, t, 'a').b_left.sum()
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
with alias as
(select tab_left.b as b
from tab as tab_left
join tab as tab_right
on tab_left.a = tab_right.a)
select sum(alias.b) as b_left_sum from alias""")
expr = join(t, t, 'a')
expr = summary(total=expr.a.sum(), smallest=expr.b_right.min())
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT min(tab_right.b) as smallest, sum(tab_left.a) as total
FROM tab AS tab_left JOIN tab AS tab_right
ON tab_left.a = tab_right.a
""")
def test_join_suffixes():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
suffixes = '_l', '_r'
expr = join(t, t, 'a', suffixes=suffixes)
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT tab{l}.a, tab{l}.b, tab{r}.b
FROM tab AS tab{l} JOIN tab AS tab{r}
ON tab{l}.a = tab{r}.a
""".format(l=suffixes[0], r=suffixes[1]))
def test_field_access_on_engines(data):
s, engine = data['s'], data['engine']
result = compute_up(s.city, engine)
assert isinstance(result, sa.Table)
assert result.name == 'city'
def test_computation_directly_on_sqlalchemy_Tables(data):
name = data['name']
s = symbol('s', discover(name))
result = into(list, compute(s.id + 1, name))
assert not isinstance(result, sa.sql.Selectable)
assert list(result) == []
def test_computation_directly_on_metadata(data):
metadata = data['metadata']
name = data['name']
s = symbol('s', discover(metadata))
result = compute(s.name, {s: metadata}, post_compute=False)
assert result == name
sql_bank = sa.Table('bank', sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
sql_cities = sa.Table('cities', sa.MetaData(),
sa.Column('name', sa.String),
sa.Column('city', sa.String))
bank = symbol('bank', discover(sql_bank))
cities = symbol('cities', discover(sql_cities))
def test_aliased_views_with_two_group_bys():
expr = by(bank.name, total=bank.amount.sum())
expr2 = by(expr.total, count=expr.name.count())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(alias.name) as count
FROM (SELECT bank.name AS name, sum(bank.amount) AS total
FROM bank
GROUP BY bank.name) as alias
GROUP BY alias.total
""")
def test_aliased_views_with_join():
joined = join(bank, cities)
expr = by(joined.city, total=joined.amount.sum())
expr2 = by(expr.total, count=expr.city.nunique())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(DISTINCT alias.city) AS count
FROM (SELECT cities.city AS city, sum(bank.amount) AS total
FROM bank
JOIN cities ON bank.name = cities.name
GROUP BY cities.city) as alias
GROUP BY alias.total
""")
def test_select_field_on_alias():
result = compute_up(t.amount, select(s).limit(10).alias('foo'))
assert normalize(str(select(result))) == normalize("""
SELECT foo.amount
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
LIMIT :param_1) as foo""")
@pytest.mark.xfail(raises=Exception,
reason="sqlalchemy.join seems to drop unnecessary tables")
def test_join_on_single_column():
expr = join(cities[['name']], bank)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
expr = join(bank, cities.name)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
def test_aliased_views_more():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
expr = join(by(L.x, y_total=L.y.sum()),
R)
result = compute(expr, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x """)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
result2 = compute(expr2, {L: lhs, R: rhs})
assert (
normalize(str(result2)) == normalize("""
SELECT alias_2.w, count(alias_2.x) as count, sum(alias_2.y_total) as total2
FROM (SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x) AS alias_2
GROUP BY alias_2.w""")
or
normalize(str(result2)) == normalize("""
SELECT bbb.w, count(alias.x) as count, sum(alias.y_total) as total2
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) as alias
JOIN bbb ON alias.x = bbb.x
GROUP BY bbb.w"""))
def test_aliased_views_with_computation():
engine = sa.create_engine('sqlite:///:memory:')
df_aaa = DataFrame({'x': [1, 2, 3, 2, 3],
'y': [2, 1, 2, 3, 1],
'z': [3, 3, 3, 1, 2]})
df_bbb = DataFrame({'w': [1, 2, 3, 2, 3],
'x': [2, 1, 2, 3, 1],
'y': [3, 3, 3, 1, 2]})
df_aaa.to_sql('aaa', engine)
df_bbb.to_sql('bbb', engine)
metadata = sa.MetaData(engine)
metadata.reflect()
sql_aaa = metadata.tables['aaa']
sql_bbb = metadata.tables['bbb']
L = symbol('aaa', discover(df_aaa))
R = symbol('bbb', discover(df_bbb))
expr = join(by(L.x, y_total=L.y.sum()),
R)
a = compute(expr, {L: df_aaa, R: df_bbb})
b = compute(expr, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
a = compute(expr2, {L: df_aaa, R: df_bbb})
b = compute(expr2, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr3 = by(expr.x, count=expr.y_total.count())
a = compute(expr3, {L: df_aaa, R: df_bbb})
b = compute(expr3, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr4 = join(expr2, R)
a = compute(expr4, {L: df_aaa, R: df_bbb})
b = compute(expr4, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
""" # Takes a while
expr5 = by(expr4.count, total=(expr4.x + expr4.y).sum())
a = compute(expr5, {L: df_aaa, R: df_bbb})
b = compute(expr5, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
"""
def test_distinct_count_on_projection():
expr = t[['amount']].distinct().count()
result = compute(expr, {t: s})
assert (
normalize(str(result)) == normalize("""
SELECT count(DISTINCT accounts.amount)
FROM accounts""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.amount) as count
FROM (SELECT DISTINCT accounts.amount AS amount
FROM accounts) as alias"""))
# note that id is the primary key
expr = t[['amount', 'id']].distinct().count()
result = compute(expr, {t: s})
assert normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT DISTINCT accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
def test_join_count():
ds = datashape.dshape(
'{t1: var * {x: int, y: int}, t2: var * {a: int, b: int}}')
engine = resource('sqlite:///:memory:', dshape=ds)
db = symbol('db', ds)
expr = join(db.t1[db.t1.x > -1], db.t2, 'x', 'a').count()
result = compute(expr, {db: engine}, post_compute=False)
expected1 = """
SELECT count(alias.x) as count
FROM (SELECT t1.x AS x, t1.y AS y, t2.b AS b
FROM t1 JOIN t2 ON t1.x = t2.a
WHERE t1.x > ?) as alias
"""
expected2 = """
SELECT count(alias2.x) AS count
FROM (SELECT alias1.x AS x, alias1.y AS y, t2.b AS b
FROM (SELECT t1.x AS x, t1.y AS y
FROM t1
WHERE t1.x > ?) AS alias1
JOIN t2 ON alias1.x = t2.a) AS alias2"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_transform_where():
t2 = t[t.id == 1]
expr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
abs(accounts.amount) as abs_amt,
sin(accounts.id) as sine
FROM accounts
WHERE accounts.id = :id_1
"""
assert normalize(str(result)) == normalize(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_merge_where():
t2 = t[t.id == 1]
expr = merge(t2[['amount', 'name']], t2.id)
result = compute(expr, s)
expected = normalize("""SELECT
accounts.amount,
accounts.name,
accounts.id
FROM accounts
WHERE accounts.id = :id_1
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_single_column():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_multiple_columns():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean(), sum_sine=tr.sine.sum())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) AS sum_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_different_order():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr.name,
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_projection():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr[['name', 'id']],
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
accounts.id,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name, accounts.id
""")
assert normalize(str(result)) == expected
def test_merge_compute():
data = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
ds = datashape.dshape('var * {id: int, name: string, amount: real}')
s = symbol('s', ds)
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
into(uri + '::table', data, dshape=ds)
expr = transform(s, amount10=s.amount * 10)
result = into(list, compute(expr, {s: data}))
assert result == [(1, 'Alice', 100, 1000),
(2, 'Bob', 200, 2000),
(4, 'Dennis', 400, 4000)]
def test_head_limit():
assert compute(t.head(5).head(10), s)._limit == 5
assert compute(t.head(10).head(5), s)._limit == 5
assert compute(t.head(10).head(10), s)._limit == 10
def test_no_extraneous_join():
ds = """ {event: var * {name: ?string,
operation: ?string,
datetime_nearest_receiver: ?datetime,
aircraft: ?string,
temperature_2m: ?float64,
temperature_5cm: ?float64,
humidity: ?float64,
windspeed: ?float64,
pressure: ?float64,
include: int64},
operation: var * {name: ?string,
runway: int64,
takeoff: bool,
datetime_nearest_close: ?string}}
"""
db = resource('sqlite:///:memory:', dshape=ds)
d = symbol('db', dshape=ds)
expr = join(d.event[d.event.include == True],
d.operation[['name', 'datetime_nearest_close']],
'operation', 'name')
result = compute(expr, db)
assert normalize(str(result)) == normalize("""
SELECT alias.operation, alias.name, alias.datetime_nearest_receiver,
alias.aircraft, alias.temperature_2m, alias.temperature_5cm,
alias.humidity, alias.windspeed, alias.pressure,
alias.include, alias.datetime_nearest_close
FROM (SELECT event.name AS name,
event.operation AS operation,
event.datetime_nearest_receiver AS datetime_nearest_receiver,
event.aircraft AS aircraft,
event.temperature_2m AS temperature_2m,
event.temperature_5cm AS temperature_5cm,
event.humidity AS humidity,
event.windspeed AS windspeed,
event.pressure AS pressure,
event.include AS include
FROM event WHERE event.include = 1) AS alias1
JOIN (SELECT operation.name AS name,
operation.datetime_nearest_close as datetime_nearest_close
FROM operation) AS alias2
ON alias1.operation = alias2.name
""")
def test_math():
result = compute(sin(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT sin(accounts.amount) as amount
FROM accounts""")
result = compute(floor(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount) as amount
FROM accounts""")
result = compute(t.amount // 2, s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount / :amount_1) AS amount
FROM accounts""")
def test_transform_order():
r = transform(t, sin_amount=sin(t.amount), cos_id=cos(t.id))
result = compute(r, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
cos(accounts.id) as cos_id,
sin(accounts.amount) as sin_amount
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_isin():
result = t[t.name.isin(['foo', 'bar'])]
result_sql_expr = str(compute(result, s))
expected = """
SELECT
accounts.name,
accounts.amount,
accounts.id
FROM
accounts
WHERE
accounts.name
IN
(:name_1,
:name_2)
"""
assert normalize(result_sql_expr) == normalize(expected)
@pytest.mark.skipif('1.0.0' <= LooseVersion(sa.__version__) <= '1.0.1',
reason=("SQLAlchemy generates different code in 1.0.0"
" and 1.0.1"))
def test_date_grouper_repeats_not_one_point_oh():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY EXTRACT(year FROM t.ds)
"""
assert normalize(result) == normalize(expected)
@pytest.mark.skipif(LooseVersion(sa.__version__) < '1.0.0' or
LooseVersion(sa.__version__) >= '1.0.2',
reason=("SQLAlchemy generates different code in < 1.0.0 "
"and >= 1.0.2"))
def test_date_grouper_repeats():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY ds_year
"""
assert normalize(result) == normalize(expected)
def test_transform_then_project_single_column():
expr = transform(t, foo=t.id + 1)[['foo', 'id']]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_transform_then_project():
proj = ['foo', 'id']
expr = transform(t, foo=t.id + 1)[proj]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_reduce_does_not_compose():
expr = by(t.name, counts=t.count()).counts.max()
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT max(alias.counts) AS counts_max
FROM alias"""
assert normalize(result) == normalize(expected)
@pytest.mark.xfail(raises=NotImplementedError)
def test_normalize_reduction():
expr = by(t.name, counts=t.count())
expr = transform(expr, normed_counts=expr.counts / expr.counts.max())
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT alias.counts / max(alias.counts) AS normed_counts
FROM alias"""
assert normalize(result) == normalize(expected)
def test_do_not_erase_group_by_functions_with_datetime():
t, s = tdate, sdate
expr = by(t[t.amount < 0].occurred_on.date,
avg_amount=t[t.amount < 0].amount.mean())
result = str(compute(expr, s))
expected = """SELECT
date(accdate.occurred_on) as occurred_on_date,
avg(accdate.amount) as avg_amount
FROM
accdate
WHERE
accdate.amount < :amount_1
GROUP BY
date(accdate.occurred_on)
"""
assert normalize(result) == normalize(expected)
def test_not():
expr = t.amount[~t.name.isin(('Billy', 'Bob'))]
result = str(compute(expr, s))
expected = """SELECT
accounts.amount
FROM
accounts
WHERE
accounts.name not in (:name_1, :name_2)
"""
assert normalize(result) == normalize(expected)
def test_slice():
start, stop, step = 50, 100, 1
result = str(compute(t[start:stop], s))
# Verifies that compute is translating the query correctly
assert result == str(select(s).offset(start).limit(stop))
# Verifies the query against expected SQL query
expected = """
SELECT accounts.name, accounts.amount, accounts.id FROM accounts
LIMIT :param_1 OFFSET :param_2
"""
assert normalize(str(result)) == normalize(str(expected))
# Step size of 1 should be alright
compute(t[start:stop:step], s)
@pytest.mark.xfail(raises=ValueError)
def test_slice_step():
start, stop, step = 50, 100, 2
compute(t[start:stop:step], s)
def test_datetime_to_date():
expr = tdate.occurred_on.date
result = str(compute(expr, sdate))
expected = """SELECT
DATE(accdate.occurred_on) as occurred_on_date
FROM
accdate
"""
assert normalize(result) == normalize(expected)
def test_sort_compose():
expr = t.name[:5].sort()
result = compute(expr, s)
expected = """select
anon_1.name
from (select
accounts.name as name
from
accounts
limit :param_1
offset :param_2) as anon_1
order by
anon_1.name asc"""
assert normalize(str(result)) == normalize(expected)
assert (normalize(str(compute(t.sort('name').name[:5], s))) !=
normalize(expected))
def test_coerce():
expr = t.amount.coerce(to='int64')
expected = """SELECT
cast(accounts.amount AS BIGINT) AS amount
FROM accounts"""
result = compute(expr, s)
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform():
tbl = transform(t, new_amount=t.amount + 1, one_two=t.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform_and_filter():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_access_on_transform_filter():
tbl = transform(t, new_amount=t.amount + 1)
expr = tbl[tbl.name == 'Alice'].new_amount
result = compute(expr, s)
expected = """SELECT
accounts.amount + :amount_1 as new_amount
FROM
accounts
WHERE
accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_on_filter_transform_groupby():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
gb = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
expr = gb.avg_amt
result = compute(expr, s)
expected = """SELECT
avg(accounts.amount + :amount_1) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_2
"""
assert normalize(str(result)) == normalize(expected)
def test_baseball_nested_by():
data = resource('sqlite:///%s' % example('teams.db'))
dshape = discover(data)
d = symbol('d', dshape)
expr = by(d.teams.name,
start_year=d.teams.yearID.min()).start_year.count_values()
result = compute(expr, data, post_compute=False)
expected = """SELECT
anon_1.start_year,
anon_1.count
FROM
(SELECT
alias.start_year as start_year,
count(alias.start_year) as count
FROM
(SELECT
min(teams.yearid) as start_year
FROM teams
GROUP BY teams.name) as alias
GROUP BY alias.start_year) as anon_1 ORDER BY anon_1.count DESC
"""
assert normalize(str(result).replace('"', '')) == normalize(expected)
def test_label_on_filter():
expr = t[t.name == 'Alice'].amount.label('foo').head(2)
result = compute(expr, s)
expected = """SELECT
accounts.amount AS foo
FROM
accounts
WHERE
accounts.name = :name_1
LIMIT :param_1
"""
assert normalize(str(result)) == normalize(expected)
| {
"repo_name": "nkhuyu/blaze",
"path": "blaze/compute/tests/test_sql_compute.py",
"copies": "2",
"size": "53619",
"license": "bsd-3-clause",
"hash": 2504637123533615600,
"line_mean": 30.2465034965,
"line_max": 89,
"alpha_frac": 0.561778474,
"autogenerated": false,
"ratio": 3.517383888743112,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079162362743112,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
from datetime import datetime
from toolz import pluck, reduceby, groupby
from datashape import Record
from blaze import into, compute, compute_up, discover, dshape, Data
from blaze.compute.mongo import MongoQuery
from blaze.expr import symbol, by, floor, ceil
from blaze.compatibility import xfail
@pytest.fixture(scope='module')
def conn():
try:
return pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip('No mongo server running')
@pytest.fixture(scope='module')
def db(conn):
return conn.test_db
bank_raw = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
@pytest.yield_fixture
def big_bank(db):
data = [{'name': 'Alice', 'amount': 100, 'city': 'New York City'},
{'name': 'Alice', 'amount': 200, 'city': 'Austin'},
{'name': 'Bob', 'amount': 100, 'city': 'New York City'},
{'name': 'Bob', 'amount': 200, 'city': 'New York City'},
{'name': 'Bob', 'amount': 300, 'city': 'San Francisco'}]
coll = db.bigbank
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def date_data(db):
n = 3
d = {'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, i) for i in [1, 2, 3]],
'amount': [100, 200, 300],
'id': [1, 2, 3]}
data = [dict(zip(d.keys(), [d[k][i] for k in d.keys()]))
for i in range(n)]
coll = into(db.date_data, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def bank(db):
coll = db.bank
coll = into(coll, bank_raw)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def missing_vals(db):
data = [{'x': 1, 'z': 100},
{'x': 2, 'y': 20, 'z': 200},
{'x': 3, 'z': 300},
{'x': 4, 'y': 40}]
coll = db.missing_vals
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def points(db):
data = [{'x': 1, 'y': 10, 'z': 100},
{'x': 2, 'y': 20, 'z': 200},
{'x': 3, 'y': 30, 'z': 300},
{'x': 4, 'y': 40, 'z': 400}]
coll = db.points
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
@pytest.yield_fixture
def events(db):
data = [{'time': datetime(2012, 1, 1, 12, 00, 00), 'x': 1},
{'time': datetime(2012, 1, 2, 12, 00, 00), 'x': 2},
{'time': datetime(2012, 1, 3, 12, 00, 00), 'x': 3}]
coll = db.events
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
t = symbol('t', 'var * {name: string, amount: int}')
bigt = symbol('bigt', 'var * {name: string, amount: int, city: string}')
p = symbol('p', 'var * {x: int, y: int, z: int}')
e = symbol('e', 'var * {time: datetime, x: int}')
q = MongoQuery('fake', [])
def test_compute_on_db(bank, points):
assert bank.database == points.database
db = bank.database
d = symbol(db.name, discover(db))
assert (compute(d.points.x.sum(), db) ==
sum(x['x'] for x in db.points.find()))
def test_symbol(bank):
assert compute(t, bank) == list(pluck(['name', 'amount'], bank_raw))
def test_projection_one():
assert compute_up(t[['name']], q).query == ({'$project': {'name': 1}},)
def test_head_one():
assert compute_up(t.head(5), q).query == ({'$limit': 5},)
def test_head(bank):
assert len(compute(t.head(2), bank)) == 2
def test_projection(bank):
assert set(compute(t.name, bank)) == set(['Alice', 'Bob'])
assert set(compute(t[['name']], bank)) == set([('Alice',), ('Bob',)])
def test_selection(bank):
assert set(compute(t[t.name == 'Alice'], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t['Alice' == t.name], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t[t.amount > 200], bank)) == set([('Bob', 300)])
assert set(compute(t[t.amount >= 200], bank)) == set([('Bob', 300),
('Bob', 200),
('Alice', 200)])
assert set(compute(t[t.name != 'Alice'].name, bank)) == set(['Bob'])
assert set(compute(t[(t.name == 'Alice') & (t.amount > 150)], bank)) == \
set([('Alice', 200)])
assert set(compute(t[(t.name == 'Alice') | (t.amount > 250)], bank)) == \
set([('Alice', 200),
('Alice', 100),
('Bob', 300)])
def test_columnwise(points):
assert set(compute(p.x + p.y, points)) == set([11, 22, 33, 44])
def test_columnwise_multiple_operands(points):
expected = [x['x'] + x['y'] - x['z'] * x['x'] / 2 for x in points.find()]
assert set(compute(p.x + p.y - p.z * p.x / 2, points)) == set(expected)
def test_arithmetic(points):
expr = p.y // p.x
assert set(compute(expr, points)) == set(compute(expr, points.find()))
def test_columnwise_mod(points):
expected = [x['x'] % x['y'] - x['z'] * x['x'] / 2 + 1
for x in points.find()]
expr = p.x % p.y - p.z * p.x / 2 + 1
assert set(compute(expr, points)) == set(expected)
@xfail(raises=NotImplementedError,
reason='MongoDB does not implement certain arith ops')
def test_columnwise_pow(points):
expected = [x['x'] ** x['y'] for x in points.find()]
assert set(compute(p.x ** p.y, points)) == set(expected)
def test_by_one():
assert compute_up(by(t.name, total=t.amount.sum()), q).query == \
({'$group': {'_id': {'name': '$name'},
'total': {'$sum': '$amount'}}},
{'$project': {'total': '$total', 'name': '$_id.name'}})
def test_by(bank):
assert set(compute(by(t.name, total=t.amount.sum()), bank)) == \
set([('Alice', 300), ('Bob', 600)])
assert set(compute(by(t.name, min=t.amount.min()), bank)) == \
set([('Alice', 100), ('Bob', 100)])
assert set(compute(by(t.name, max=t.amount.max()), bank)) == \
set([('Alice', 200), ('Bob', 300)])
assert set(compute(by(t.name, count=t.name.count()), bank)) == \
set([('Alice', 2), ('Bob', 3)])
def test_reductions(bank):
assert compute(t.amount.min(), bank) == 100
assert compute(t.amount.max(), bank) == 300
assert compute(t.amount.sum(), bank) == 900
def test_distinct(bank):
assert set(compute(t.name.distinct(), bank)) == set(['Alice', 'Bob'])
def test_nunique_collection(bank):
assert compute(t.nunique(), bank) == len(bank_raw)
def test_sort(bank):
assert compute(t.amount.sort('amount'), bank) == \
[100, 100, 200, 200, 300]
assert compute(t.amount.sort('amount', ascending=False), bank) == \
[300, 200, 200, 100, 100]
def test_by_multi_column(bank):
assert set(compute(by(t[['name', 'amount']], count=t.count()), bank)) == \
set([(d['name'], d['amount'], 1) for d in bank_raw])
def test_datetime_handling(events):
assert set(compute(e[e.time >= datetime(2012, 1, 2, 12, 0, 0)].x,
events)) == set([2, 3])
assert set(compute(e[e.time >= "2012-01-02"].x,
events)) == set([2, 3])
def test_summary_kwargs(bank):
expr = by(t.name, total=t.amount.sum(), avg=t.amount.mean())
result = compute(expr, bank)
assert result == [('Bob', 200.0, 600), ('Alice', 150.0, 300)]
def test_summary_count(bank):
expr = by(t.name, how_many=t.amount.count())
result = compute(expr, bank)
assert result == [('Bob', 3), ('Alice', 2)]
def test_summary_arith(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).sum())
result = compute(expr, bank)
assert result == [('Bob', 603), ('Alice', 302)]
def test_summary_arith_min(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).min())
result = compute(expr, bank)
assert result == [('Bob', 101), ('Alice', 101)]
def test_summary_arith_max(bank):
expr = by(t.name, add_one_and_sum=(t.amount + 1).max())
result = compute(expr, bank)
assert result == [('Bob', 301), ('Alice', 201)]
def test_summary_complex_arith(bank):
expr = by(t.name, arith=(100 - t.amount * 2 / 30.0).sum())
result = compute(expr, bank)
reducer = lambda acc, x: (100 - x['amount'] * 2 / 30.0) + acc
expected = reduceby('name', reducer, bank.find(), 0)
assert set(result) == set(expected.items())
def test_summary_complex_arith_multiple(bank):
expr = by(t.name, arith=(100 - t.amount * 2 / 30.0).sum(),
other=t.amount.mean())
result = compute(expr, bank)
reducer = lambda acc, x: (100 - x['amount'] * 2 / 30.0) + acc
expected = reduceby('name', reducer, bank.find(), 0)
mu = reduceby('name', lambda acc, x: acc + x['amount'], bank.find(), 0.0)
values = list(mu.values())
items = expected.items()
counts = groupby('name', bank.find())
items = [x + (float(v) / len(counts[x[0]]),)
for x, v in zip(items, values)]
assert set(result) == set(items)
def test_like(bank):
bank.create_index([('name', pymongo.TEXT)])
expr = t.like(name='*Alice*')
result = compute(expr, bank)
assert set(result) == set((('Alice', 100), ('Alice', 200)))
def test_like_multiple(big_bank):
expr = bigt.like(name='*Bob*', city='*York*')
result = compute(expr, big_bank)
assert set(result) == set((('Bob', 100, 'New York City'),
('Bob', 200, 'New York City')))
def test_like_mulitple_no_match(big_bank):
# make sure we aren't OR-ing the matches
expr = bigt.like(name='*York*', city='*Bob*')
result = compute(expr, big_bank)
assert not set(result)
def test_missing_values(missing_vals):
assert discover(missing_vals).subshape[0] == \
dshape('{x: int64, y: ?int64, z: ?int64}')
assert set(compute(p.y, missing_vals)) == set([None, 20, None, 40])
def test_datetime_access(date_data):
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
py_data = into(list, date_data) # a python version of the collection
for attr in ['day', 'minute', 'second', 'year', 'month']:
assert list(compute(getattr(t.when, attr), date_data)) == \
list(compute(getattr(t.when, attr), py_data))
def test_datetime_access_and_arithmetic(date_data):
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
py_data = into(list, date_data) # a python version of the collection
expr = t.when.day + t.id
assert list(compute(expr, date_data)) == list(compute(expr, py_data))
def test_floor_ceil(bank):
t = symbol('t', discover(bank))
assert set(compute(200 * floor(t.amount / 200), bank)) == set([0, 200])
assert set(compute(200 * ceil(t.amount / 200), bank)) == set([200, 400])
def test_Data_construct(bank, points):
d = Data('mongodb://localhost/test_db')
assert 'bank' in d.fields
assert 'points' in d.fields
assert isinstance(d.dshape.measure, Record)
def test_Data_construct_with_table(bank):
d = Data('mongodb://localhost/test_db::bank')
assert set(d.fields) == set(('name', 'amount'))
assert int(d.count()) == 5
def test_and_same_key(bank):
expr = t[(t.amount > 100) & (t.amount < 300)]
result = compute(expr, bank)
expected = [('Alice', 200), ('Bob', 200)]
assert result == expected
def test_interactive_dshape_works():
d = Data('mongodb://localhost:27017/test_db::bank',
dshape='var * {name: string, amount: int64}')
assert d.dshape == dshape('var * {name: string, amount: int64}')
@pytest.mark.xfail(raises=TypeError, reason="IsIn not yet implemented")
def test_isin_fails(bank):
expr = t[t.amount.isin([100])]
result = compute(expr, bank)
assert result == compute(t[t.amount == 100], bank)
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/compute/tests/test_mongo_compute.py",
"copies": "1",
"size": "12208",
"license": "bsd-3-clause",
"hash": 578315092615927900,
"line_mean": 29.9063291139,
"line_max": 82,
"alpha_frac": 0.5540629096,
"autogenerated": false,
"ratio": 3.1766848816029145,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9227823419009529,
"avg_score": 0.0005848744386772277,
"num_lines": 395
} |
from __future__ import absolute_import, division, print_function
import pytest
pyspark = pytest.importorskip('pyspark')
import pandas as pd
from blaze import compute, symbol, summary, exp, by, join, merge
from toolz import identity
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
# this only exists because we need to have a single session scoped spark
# context, otherwise these would simply be global variables
@pytest.fixture
def rdd(sc):
return sc.parallelize(data)
@pytest.fixture
def rdd2(sc):
return sc.parallelize(data2)
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = symbol('t2', 'var * {name: string, city: string}')
# Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = symbol('idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')
def test_spark_symbol(rdd):
assert compute(t, rdd) == rdd
def test_spark_projection(rdd):
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_spark_multicols_projection(rdd):
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
inc = lambda x: x + 1
reduction_exprs = [
t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
summary(a=t.amount.sum(), b=t.id.count()),
t['amount'].std()]
def test_spark_reductions(rdd):
for expr in reduction_exprs:
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = [
t['amount'],
t['amount'] == 100,
t['amount'].truncate(150),
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
by(t['name'], total=t['amount'].sum()),
by(t['name'], total=(t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda tup: tup[1] + tup[2], 'real'),
t.like(name='Alice'),
t['amount'].apply(identity, 'var * real', splittable=True),
t['amount'].map(inc, 'int')]
def test_spark_basic(rdd):
check_exprs_against_python(exprs, data, rdd)
def check_exprs_against_python(exprs, data, rdd):
any_bad = False
for expr in exprs:
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
if not result == expected:
any_bad = True
print("Expression:", expr)
print("Spark:", result)
print("Python:", expected)
assert not any_bad
def test_spark_big_by(sc):
tbig = symbol(
'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rddbig = sc.parallelize(databig)
check_exprs_against_python(big_exprs, databig, rddbig)
def test_head(rdd):
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
def test_sort(rdd):
check_exprs_against_python([
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(t['amount'], ascending=True),
t.sort(-t['amount'].label('foo') + 1, ascending=True),
t.sort(['amount', 'id'])], data, rdd)
def test_distinct(rdd):
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
@pytest.mark.xfail(
raises=NotImplementedError,
reason='cannot specify columns to distinct on yet',
)
def test_distinct_on(rdd):
compute(t.distinct('name'), rdd)
def test_join(rdd, rdd2):
joined = join(t, t2, 'name')
expected = [('Alice', 100, 1, 'Austin'),
('Bob', 200, 2, 'Boston'),
('Alice', 50, 3, 'Austin')]
result = compute(joined, {t: rdd, t2: rdd2}).collect()
assert all(i in expected for i in result)
def test_multi_column_join(sc):
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
rleft = sc.parallelize(left)
rright = sc.parallelize(right)
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
result = compute(j, {L: rleft, R: rright})
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
assert set(result.collect()) == set(expected)
def test_groupby(sc):
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = join(t_arc, t_idx, "node_id")
t = by(joined['name'], count=joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx: rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_multi_level_rowfunc_works(rdd):
expr = t['amount'].map(lambda x: x + 1, 'int')
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
def test_merge(rdd):
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [
(row[0], row[1] * 2) for row in data]
def test_selection_out_of_order(rdd):
expr = t['name'][t['amount'] < 100]
assert compute(expr, rdd).collect() == ['Alice']
def test_recursive_rowfunc_is_used(rdd):
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2 * (101 + 53)),
('Bob', 2 * (202))]
assert set(compute(expr, rdd).collect()) == set(expected)
def test_outer_join(sc):
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = sc.parallelize(left)
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = sc.parallelize(right)
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# Full outer join not yet supported
assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/compute/tests/test_spark.py",
"copies": "3",
"size": "7842",
"license": "bsd-3-clause",
"hash": -5265908584477134000,
"line_mean": 26.0413793103,
"line_max": 87,
"alpha_frac": 0.5248661056,
"autogenerated": false,
"ratio": 3.11067036890123,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009727507706104615,
"num_lines": 290
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('dill')
from toolz import (merge, join, pipe, filter, identity, merge_with, take,
partial)
import math
from dask.bag.core import (Bag, lazify, lazify_task, fuse, map, collect,
reduceby, bz2_stream, stream_decompress)
from dask.utils import filetexts, tmpfile, raises
import dask
from pbag import PBag
import dask.bag as db
import shutil
import os
import gzip
import bz2
from collections import Iterator
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def inc(x):
return x + 1
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def add(x, y):
return x + y
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert sorted(b._keys()) == sorted(dsk.keys())
def test_map():
c = b.map(inc)
expected = merge(dsk, dict(((c.name, i), (list, (map, inc, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_map_function_with_multiple_arguments():
b = db.from_sequence([(1, 10), (2, 20), (3, 30)], npartitions=3)
assert list(b.map(lambda x, y: x + y)) == [11, 22, 33]
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(list, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
assert raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
def test_fold_computation():
assert int(b.fold(add)) == sum(L)
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
def test_frequencies():
assert dict(list(b.frequencies())) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
assert list(b.topk(4, key=lambda x: -x)) == [0, 0, 0, 1]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert int(b.any()) == True
assert int(b.all()) == False # some zeros exist
def test_mean():
assert b.mean().compute(get=dask.get) == 2.0
assert float(b.mean()) == 2.0
def test_std():
assert b.std().compute(get=dask.get) == math.sqrt(2.0)
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert b.var().compute(get=dask.get) == 2.0
assert float(b.var()) == 2.0
def test_join():
assert list(b.join([1, 2, 3], on_self=isodd, on_other=iseven)) == \
list(join(iseven, [1, 2, 3], isodd, list(b)))
assert list(b.join([1, 2, 3], isodd)) == \
list(join(isodd, [1, 2, 3], isodd, list(b)))
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
def test_lazify_task():
task = (sum, (list, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (list, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (list, (map, inc,
(list, (filter, iseven, 'y'))))
b = (list, (map, inc,
(filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (list, (map, inc,
(list, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (list, (map, inc,
(filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
def test_from_filenames():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert set(line.strip() for line in db.from_filenames(fns)) == \
set('ABCD')
assert set(line.strip() for line in db.from_filenames('a*.log')) == \
set('ABCD')
assert raises(ValueError, lambda: db.from_filenames('non-existent-*-path'))
def test_from_filenames_gzip():
b = db.from_filenames(['foo.json.gz', 'bar.json.gz'])
assert (set(b.dask.values()) ==
set([(list, (gzip.open, os.path.abspath('foo.json.gz'))),
(list, (gzip.open, os.path.abspath('bar.json.gz')))]))
def test_from_filenames_bz2():
b = db.from_filenames(['foo.json.bz2', 'bar.json.bz2'])
assert (set(b.dask.values()) ==
set([(list, (bz2.BZ2File, os.path.abspath('foo.json.bz2'))),
(list, (bz2.BZ2File, os.path.abspath('bar.json.bz2')))]))
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
def test_collect():
a = PBag(identity, 2)
with a:
a.extend([0, 1, 2, 3])
b = PBag(identity, 2)
with b:
b.extend([0, 1, 2, 3])
result = merge(dict(collect(identity, 2, 0, [a, b])),
dict(collect(identity, 2, 1, [a, b])))
assert result == {0: [0, 0],
1: [1, 1],
2: [2, 2],
3: [3, 3]}
def test_groupby():
result = dict(b.groupby(lambda x: x))
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert b.groupby(lambda x: x).npartitions == b.npartitions
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert result == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
assert dict(result) == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
b = db.from_sequence([1, 2, 3]).map(lambda x: x * [1, 2, 3])
assert list(b.concat()) == [1, 2, 3] * sum([1, 2, 3])
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
try:
import dask.dataframe
import pandas as pd
except ImportError:
return
b = db.from_sequence([(1, 2), (10, 20), (100, 200)], npartitions=2)
df = b.to_dataframe()
assert list(df.columns) == list(pd.DataFrame(list(b)).columns)
df = b.to_dataframe(columns=['a', 'b'])
assert df.npartitions == b.npartitions
assert list(df.columns) == ['a', 'b']
assert df.a.compute().values.tolist() == list(b.pluck(0))
assert df.b.compute().values.tolist() == list(b.pluck(1))
b = db.from_sequence([{'a': 1, 'b': 2},
{'a': 10, 'b': 20},
{'a': 100, 'b': 200}], npartitions=2)
df2 = b.to_dataframe()
assert (df2.compute().values == df.compute().values).all()
def test_to_textfiles():
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
for ext, myopen in [('gz', gzip.open), ('bz2', bz2.BZ2File), ('', open)]:
c = b.to_textfiles('_foo/*.' + ext)
assert c.npartitions == b.npartitions
try:
c.compute(get=dask.get)
assert os.path.exists('_foo/1.' + ext)
f = myopen('_foo/1.' + ext, 'r')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
finally:
shutil.rmtree('_foo')
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b]).compute()
assert os.path.exists(a)
assert os.path.exists(b)
with tmpfile() as dirname:
B.to_textfiles(dirname).compute()
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
assert raises(ValueError, lambda: B.to_textfiles(5))
def test_bz2_stream():
text = '\n'.join(map(str, range(10000)))
compressed = bz2.compress(text.encode())
assert list(take(100, bz2_stream(compressed))) == list(map(str, range(100)))
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
assert raises(AttributeError, lambda: b.str.sfohsofhf)
def test_stream_decompress():
data = 'abc\ndef\n123'.encode()
assert [s.strip() for s in stream_decompress('', data)] == \
['abc', 'def', '123']
assert [s.strip() for s in stream_decompress('bz2', bz2.compress(data))] == \
['abc', 'def', '123']
with tmpfile() as fn:
f = gzip.open(fn, 'wb')
f.write(data)
f.close()
with open(fn, 'rb') as f:
compressed = f.read()
assert [s.strip() for s in stream_decompress('gz', compressed)] == \
[b'abc', b'def', b'123']
| {
"repo_name": "esc/dask",
"path": "dask/bag/tests/test_bag.py",
"copies": "1",
"size": "11489",
"license": "bsd-3-clause",
"hash": -8498418526715922000,
"line_mean": 27.57960199,
"line_max": 81,
"alpha_frac": 0.521107146,
"autogenerated": false,
"ratio": 2.9428790983606556,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8939825380509389,
"avg_score": 0.004832172770253426,
"num_lines": 402
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('dill')
from toolz import (merge, join, pipe, filter, identity, merge_with, take,
partial, valmap)
import math
from dask.bag.core import (Bag, lazify, lazify_task, fuse, map, collect,
reduceby, bz2_stream, stream_decompress, reify, partition,
_parse_s3_URI, inline_singleton_lists, optimize)
from dask.utils import filetexts, tmpfile, raises
from dask.async import get_sync
import dask
import dask.bag as db
import shutil
import os
import gzip
import bz2
import partd
from tempfile import mkdtemp
from collections import Iterator
dsk = {('x', 0): (range, 5),
('x', 1): (range, 5),
('x', 2): (range, 5)}
L = list(range(5)) * 3
b = Bag(dsk, 'x', 3)
def inc(x):
return x + 1
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def add(x, y):
return x + y
def test_Bag():
assert b.name == 'x'
assert b.npartitions == 3
def test_keys():
assert sorted(b._keys()) == sorted(dsk.keys())
def test_map():
c = b.map(inc)
expected = merge(dsk, dict(((c.name, i), (reify, (map, inc, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_map_function_with_multiple_arguments():
b = db.from_sequence([(1, 10), (2, 20), (3, 30)], npartitions=3)
assert list(b.map(lambda x, y: x + y).compute(get=dask.get)) == [11, 22, 33]
def test_filter():
c = b.filter(iseven)
expected = merge(dsk, dict(((c.name, i),
(reify, (filter, iseven, (b.name, i))))
for i in range(b.npartitions)))
assert c.dask == expected
def test_remove():
assert list(b.remove(lambda x: x % 2 == 0)) == [1, 3] * 3
def test_iter():
assert sorted(list(b)) == sorted(L)
assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)
def test_pluck():
d = {('x', 0): [(1, 10), (2, 20)],
('x', 1): [(3, 30), (4, 40)]}
b = Bag(d, 'x', 2)
assert set(b.pluck(0)) == set([1, 2, 3, 4])
assert set(b.pluck(1)) == set([10, 20, 30, 40])
assert set(b.pluck([1, 0])) == set([(10, 1), (20, 2), (30, 3), (40, 4)])
def test_pluck_with_default():
b = db.from_sequence(['Hello', '', 'World'])
assert raises(IndexError, lambda: list(b.pluck(0)))
assert list(b.pluck(0, None)) == ['H', None, 'W']
def test_fold_computation():
assert int(b.fold(add)) == sum(L)
def test_distinct():
assert sorted(b.distinct()) == [0, 1, 2, 3, 4]
def test_frequencies():
assert dict(list(b.frequencies())) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}
def test_topk():
assert list(b.topk(4)) == [4, 4, 4, 3]
assert list(b.topk(4, key=lambda x: -x).compute(get=dask.get)) == \
[0, 0, 0, 1]
def test_topk_with_non_callable_key():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]
assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]
def test_topk_with_multiarg_lambda():
b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)
assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]
def test_lambdas():
assert list(b.map(lambda x: x + 1)) == list(b.map(inc))
def test_reductions():
assert int(b.count()) == 15
assert int(b.sum()) == 30
assert int(b.max()) == 4
assert int(b.min()) == 0
assert int(b.any()) == True
assert int(b.all()) == False # some zeros exist
def test_mean():
assert b.mean().compute(get=dask.get) == 2.0
assert float(b.mean()) == 2.0
def test_std():
assert b.std().compute(get=dask.get) == math.sqrt(2.0)
assert float(b.std()) == math.sqrt(2.0)
def test_var():
assert b.var().compute(get=dask.get) == 2.0
assert float(b.var()) == 2.0
def test_join():
assert list(b.join([1, 2, 3], on_self=isodd, on_other=iseven)) == \
list(join(iseven, [1, 2, 3], isodd, list(b)))
assert list(b.join([1, 2, 3], isodd)) == \
list(join(isodd, [1, 2, 3], isodd, list(b)))
def test_foldby():
c = b.foldby(iseven, add, 0, add, 0)
assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
c = b.foldby(iseven, lambda acc, x: acc + x)
assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())
def test_map_partitions():
assert list(b.map_partitions(len)) == [5, 5, 5]
def test_lazify_task():
task = (sum, (reify, (map, inc, [1, 2, 3])))
assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))
task = (reify, (map, inc, [1, 2, 3]))
assert lazify_task(task) == task
a = (reify, (map, inc,
(reify, (filter, iseven, 'y'))))
b = (reify, (map, inc,
(filter, iseven, 'y')))
assert lazify_task(a) == b
f = lambda x: x
def test_lazify():
a = {'x': (reify, (map, inc,
(reify, (filter, iseven, 'y')))),
'a': (f, 'x'), 'b': (f, 'x')}
b = {'x': (reify, (map, inc,
(filter, iseven, 'y'))),
'a': (f, 'x'), 'b': (f, 'x')}
assert lazify(a) == b
def test_inline_singleton_lists():
inp = {'b': (list, 'a'),
'c': (f, 'b', 1)}
out = {'c': (f, (list, 'a'), 1)}
assert inline_singleton_lists(inp) == out
out = {'c': (f, 'a' , 1)}
assert optimize(inp, ['c']) == out
inp = {'b': (list, 'a'),
'c': (f, 'b', 1),
'd': (f, 'b', 2)}
assert inline_singleton_lists(inp) == inp
inp = {'b': (4, 5)} # doesn't inline constants
assert inline_singleton_lists(inp) == inp
def test_take():
assert list(b.take(2)) == [0, 1]
assert b.take(2) == (0, 1)
assert isinstance(b.take(2, compute=False), Bag)
def test_map_is_lazy():
from dask.bag.core import map
assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)
def test_can_use_dict_to_make_concrete():
assert isinstance(dict(b.frequencies()), dict)
@pytest.mark.slow
def test_from_url():
a = db.from_url(['http://google.com', 'http://github.com'])
assert a.npartitions == 2
b = db.from_url('http://raw.githubusercontent.com/ContinuumIO/dask/master/README.rst')
assert b.npartitions == 1
assert b'Dask\n' in b.take(10)
def test_from_filenames():
with filetexts({'a1.log': 'A\nB', 'a2.log': 'C\nD'}) as fns:
assert set(line.strip() for line in db.from_filenames(fns)) == \
set('ABCD')
assert set(line.strip() for line in db.from_filenames('a*.log')) == \
set('ABCD')
assert raises(ValueError, lambda: db.from_filenames('non-existent-*-path'))
def test_from_filenames_gzip():
b = db.from_filenames(['foo.json.gz', 'bar.json.gz'])
assert (set(b.dask.values()) ==
set([(list, (gzip.open, os.path.abspath('foo.json.gz'))),
(list, (gzip.open, os.path.abspath('bar.json.gz')))]))
def test_from_filenames_bz2():
b = db.from_filenames(['foo.json.bz2', 'bar.json.bz2'])
assert (set(b.dask.values()) ==
set([(list, (bz2.BZ2File, os.path.abspath('foo.json.bz2'))),
(list, (bz2.BZ2File, os.path.abspath('bar.json.bz2')))]))
def test_from_filenames_large():
with tmpfile() as fn:
with open(fn, 'w') as f:
f.write('Hello, world!\n' * 100)
b = db.from_filenames(fn, chunkbytes=100)
c = db.from_filenames(fn)
assert len(b.dask) > 5
assert list(b) == list(c)
d = db.from_filenames([fn], chunkbytes=100)
assert list(b) == list(d)
def test_from_filenames_large_gzip():
with tmpfile('gz') as fn:
f = gzip.open(fn, 'wb')
f.write(b'Hello, world!\n' * 100)
f.close()
b = db.from_filenames(fn, chunkbytes=100)
c = db.from_filenames(fn)
assert len(b.dask) > 5
assert list(b) == [s.decode() for s in c]
@pytest.mark.slow
def test_from_s3():
# note we don't test connection modes with aws_access_key and
# aws_secret_key because these are not on travis-ci
boto = pytest.importorskip('boto')
five_tips = (u'total_bill,tip,sex,smoker,day,time,size\n',
u'16.99,1.01,Female,No,Sun,Dinner,2\n',
u'10.34,1.66,Male,No,Sun,Dinner,3\n',
u'21.01,3.5,Male,No,Sun,Dinner,3\n',
u'23.68,3.31,Male,No,Sun,Dinner,2\n')
# test compressed data
e = db.from_s3('tip-data', 't*.gz')
assert e.take(5) == five_tips
# test wit specific key
b = db.from_s3('tip-data', 't?ps.csv')
assert b.npartitions == 1
# test all keys in bucket
c = db.from_s3('tip-data')
assert c.npartitions == 4
d = db.from_s3('s3://tip-data')
assert d.npartitions == 4
e = db.from_s3('tip-data', 'tips.bz2')
assert e.take(5) == five_tips
def test__parse_s3_URI():
bn, p = _parse_s3_URI('s3://mybucket/mykeys', '*')
assert (bn == 'mybucket') and (p == 'mykeys')
bn, p = _parse_s3_URI('s3://snow/g?obes', '*')
assert (bn == 'snow') and (p == 'g?obes')
bn, p = _parse_s3_URI('s3://tupper/wea*', '*')
assert (bn == 'tupper') and (p == 'wea*')
bn, p = _parse_s3_URI('s3://sand/', 'cast?es')
assert (bn == 'sand') and (p == 'cast?es')
def test_from_sequence():
b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)
assert len(b.dask) == 3
assert set(b) == set([1, 2, 3, 4, 5])
def test_from_long_sequence():
L = list(range(1001))
b = db.from_sequence(L)
assert set(b) == set(L)
def test_product():
b2 = b.product(b)
assert b2.npartitions == b.npartitions**2
assert set(b2) == set([(i, j) for i in L for j in L])
x = db.from_sequence([1, 2, 3, 4])
y = db.from_sequence([10, 20, 30])
z = x.product(y)
assert set(z) == set([(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]])
def test_partition_collect():
with partd.Pickle() as p:
partition(identity, range(6), 3, p)
assert set(p.get(0)) == set([0, 3])
assert set(p.get(1)) == set([1, 4])
assert set(p.get(2)) == set([2, 5])
assert sorted(collect(identity, 0, p, '')) == \
[(0, [0]), (3, [3])]
def test_groupby():
c = b.groupby(lambda x: x)
result = dict(c)
assert result == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert b.groupby(lambda x: x).npartitions == b.npartitions
def test_groupby_with_indexer():
b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])
result = dict(b.groupby(0))
assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]],
2: [[2, 3, 4]]}
def test_groupby_with_npartitions_changed():
result = b.groupby(lambda x: x, npartitions=1)
result2 = dict(result)
assert result2 == {0: [0, 0 ,0],
1: [1, 1, 1],
2: [2, 2, 2],
3: [3, 3, 3],
4: [4, 4, 4]}
assert result.npartitions == 1
def test_concat():
a = db.from_sequence([1, 2, 3])
b = db.from_sequence([4, 5, 6])
c = db.concat([a, b])
assert list(c) == [1, 2, 3, 4, 5, 6]
b = db.from_sequence([1, 2, 3]).map(lambda x: x * [1, 2, 3])
assert list(b.concat()) == [1, 2, 3] * sum([1, 2, 3])
def test_args():
c = b.map(lambda x: x + 1)
d = Bag(*c._args)
assert list(c) == list(d)
assert c.npartitions == d.npartitions
def test_to_dataframe():
try:
import dask.dataframe
import pandas as pd
except ImportError:
return
b = db.from_sequence([(1, 2), (10, 20), (100, 200)], npartitions=2)
df = b.to_dataframe()
assert list(df.columns) == list(pd.DataFrame(list(b)).columns)
df = b.to_dataframe(columns=['a', 'b'])
assert df.npartitions == b.npartitions
assert list(df.columns) == ['a', 'b']
assert df.a.compute().values.tolist() == list(b.pluck(0))
assert df.b.compute().values.tolist() == list(b.pluck(1))
b = db.from_sequence([{'a': 1, 'b': 2},
{'a': 10, 'b': 20},
{'a': 100, 'b': 200}], npartitions=2)
df2 = b.to_dataframe()
assert (df2.compute().values == df.compute().values).all()
def test_to_textfiles():
b = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
dir = mkdtemp()
for ext, myopen in [('gz', gzip.open), ('bz2', bz2.BZ2File), ('', open)]:
c = b.to_textfiles(os.path.join(dir, '*.' + ext))
assert c.npartitions == b.npartitions
try:
c.compute(get=dask.get)
assert os.path.exists(os.path.join(dir, '1.' + ext))
f = myopen(os.path.join(dir, '1.' + ext), 'r')
text = f.read()
if hasattr(text, 'decode'):
text = text.decode()
assert 'xyz' in text
f.close()
finally:
if os.path.exists(dir):
shutil.rmtree(dir)
def test_to_textfiles_inputs():
B = db.from_sequence(['abc', '123', 'xyz'], npartitions=2)
with tmpfile() as a:
with tmpfile() as b:
B.to_textfiles([a, b]).compute()
assert os.path.exists(a)
assert os.path.exists(b)
with tmpfile() as dirname:
B.to_textfiles(dirname).compute()
assert os.path.exists(dirname)
assert os.path.exists(os.path.join(dirname, '0.part'))
assert raises(ValueError, lambda: B.to_textfiles(5))
def test_bz2_stream():
text = '\n'.join(map(str, range(10000)))
compressed = bz2.compress(text.encode())
assert (list(take(100, bz2_stream(compressed))) ==
list(map(lambda x: str(x) + '\n', range(100))))
def test_string_namespace():
b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert 'split' in dir(b.str)
assert 'match' in dir(b.str)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
assert list(b.str.split(' ')) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
assert list(b.str.match('*Smith')) == ['Alice Smith', 'Charlie Smith']
assert raises(AttributeError, lambda: b.str.sfohsofhf)
def test_string_namespace_with_unicode():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.lower()) == ['alice smith', 'bob jones', 'charlie smith']
def test_str_empty_split():
b = db.from_sequence([u'Alice Smith', u'Bob Jones', 'Charlie Smith'],
npartitions=2)
assert list(b.str.split()) == [['Alice', 'Smith'],
['Bob', 'Jones'],
['Charlie', 'Smith']]
def test_stream_decompress():
data = 'abc\ndef\n123'.encode()
assert [s.strip() for s in stream_decompress('', data)] == \
['abc', 'def', '123']
assert [s.strip() for s in stream_decompress('bz2', bz2.compress(data))] == \
['abc', 'def', '123']
with tmpfile() as fn:
f = gzip.open(fn, 'wb')
f.write(data)
f.close()
with open(fn, 'rb') as f:
compressed = f.read()
assert [s.strip() for s in stream_decompress('gz', compressed)] == \
[b'abc', b'def', b'123']
def test_map_with_iterator_function():
b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)
def f(L):
for x in L:
yield x + 1
c = b.map(f)
assert list(c) == [[2, 3, 4], [5, 6, 7]]
def test_ensure_compute_output_is_concrete():
b = db.from_sequence([1, 2, 3])
result = b.map(lambda x: x + 1).compute()
assert not isinstance(result, Iterator)
| {
"repo_name": "simudream/dask",
"path": "dask/bag/tests/test_bag.py",
"copies": "4",
"size": "16038",
"license": "bsd-3-clause",
"hash": 7355202932834508000,
"line_mean": 28.2664233577,
"line_max": 90,
"alpha_frac": 0.5288065844,
"autogenerated": false,
"ratio": 2.904382470119522,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003966274181481773,
"num_lines": 548
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
from base64 import b64encode
import datashape
import numpy as np
from datetime import datetime
from pandas import DataFrame
from toolz import pipe
from odo import odo
from blaze.utils import example
from blaze import discover, symbol, by, CSV, compute, join, into, resource
from blaze.server.client import mimetype
from blaze.server.server import Server, to_tree, from_tree
from blaze.server.serialization import all_formats
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
events = DataFrame([[1, datetime(2000, 1, 1, 12, 0, 0)],
[2, datetime(2000, 1, 2, 12, 0, 0)]],
columns=['value', 'when'])
db = resource('sqlite:///' + example('iris.db'))
data = {'accounts': accounts,
'cities': cities,
'events': events,
'db': db}
@pytest.fixture(scope='module')
def server():
s = Server(data, all_formats)
s.app.testing = True
return s
@pytest.yield_fixture
def test(server):
with server.app.test_client() as c:
yield c
def test_datasets(test):
response = test.get('/datashape')
assert response.data.decode('utf-8') == str(discover(data))
@pytest.mark.parametrize('serial', all_formats)
def test_bad_responses(test, serial):
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
data=serial.dumps(500),
).status
assert 'OK' not in test.post(
'/compute/non-existent-table.{name}'.format(name=serial.name),
data=serial.dumps(0),
).status
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
).status
def test_to_from_json():
t = symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
expected = {'op': 'sum',
'args': [{'op': 'Field',
'args':
[
{'op': 'Symbol',
'args': [
't',
'var * {name: string, amount: int32}',
None
]
},
'amount'
]
}, [0], False]
}
assert to_tree(expr) == expected
@pytest.mark.parametrize('serial', all_formats)
def test_to_tree_slice(serial):
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t[:5]
expr2 = pipe(expr, to_tree, serial.dumps, serial.loads, from_tree)
assert expr.isidentical(expr2)
def test_to_from_tree_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
t = symbol('t', discover(data))
@pytest.mark.parametrize('serial', all_formats)
def test_compute(test, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['amount_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_get_datetimes(test, serial):
expr = t.events
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
ds = datashape.dshape(data['datashape'])
result = into(np.ndarray, data['data'], dshape=ds)
assert into(list, result) == into(list, events)
assert data['names'] == events.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def dont_test_compute_with_namespace(test, serial):
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['name']
@pytest.yield_fixture
def iris_server():
iris = CSV(example('iris.csv'))
s = Server(iris, all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
iris = CSV(example('iris.csv'))
@pytest.mark.parametrize('serial', all_formats)
def test_compute_with_variable_in_namespace(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
pl = symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = serial.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert result == expected
assert data['names'] == ['species']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_by_with_summary(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = by(
t.species,
max=t.petal_length.max(),
sum=t.petal_width.sum(),
)
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = DataFrame(data['data']).values
expected = compute(expr, iris).values
np.testing.assert_array_equal(result[:, 0], expected[:, 0])
np.testing.assert_array_almost_equal(result[:, 1:], expected[:, 1:])
assert data['names'] == ['species', 'max', 'sum']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_column_wise(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = compute(expr, iris)
assert list(map(tuple, result)) == into(list, expected)
assert data['names'] == t.fields
@pytest.mark.parametrize('serial', all_formats)
def test_multi_expression_compute(test, serial):
s = symbol('s', discover(data))
expr = join(s.accounts, s.cities)
resp = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert 'OK' in resp.status
respdata = serial.loads(resp.data)
result = respdata['data']
expected = compute(expr, {s: data})
assert list(map(tuple, result)) == into(list, expected)
assert respdata['names'] == expr.fields
@pytest.mark.parametrize('serial', all_formats)
def test_leaf_symbol(test, serial):
query = {'expr': {'op': 'Field', 'args': [':leaf', 'cities']}}
resp = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
data = serial.loads(resp.data)
a = data['data']
b = into(list, cities)
assert list(map(tuple, a)) == b
assert data['names'] == cities.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def test_sqlalchemy_result(test, serial):
expr = t.db.iris.head(5)
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
result = data['data']
assert all(isinstance(item, (tuple, list)) for item in result)
assert data['names'] == t.db.iris.fields
def test_server_accepts_non_nonzero_ables():
Server(DataFrame())
@pytest.mark.parametrize('serial', all_formats)
def test_server_can_compute_sqlalchemy_reductions(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_serialization_endpoints(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.fixture
def has_bokeh():
try:
from bokeh.server.crossdomain import crossdomain
except ImportError as e:
pytest.skip(str(e))
@pytest.mark.parametrize('serial', all_formats)
def test_cors_compute(test, serial, has_bokeh):
expr = t.db.iris.petal_length.sum()
res = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' in res.headers['Access-Control-Allow-Methods']
assert 'POST' in res.headers['Access-Control-Allow-Methods']
# we don't allow gets because we're always sending data
assert 'GET' not in res.headers['Access-Control-Allow-Methods']
@pytest.mark.parametrize('method',
['get',
pytest.mark.xfail('head', raises=AssertionError),
pytest.mark.xfail('options', raises=AssertionError),
pytest.mark.xfail('post', raises=AssertionError)])
def test_cors_datashape(test, method, has_bokeh):
res = getattr(test, method)('/datashape')
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' not in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' not in res.headers['Access-Control-Allow-Methods']
assert 'POST' not in res.headers['Access-Control-Allow-Methods']
# we only allow GET requests
assert 'GET' in res.headers['Access-Control-Allow-Methods']
@pytest.fixture(scope='module')
def username():
return 'blaze-dev'
@pytest.fixture(scope='module')
def password():
return 'SecretPassword123'
@pytest.fixture(scope='module')
def server_with_auth(username, password):
def auth(a):
return a and a.username == username and a.password == password
s = Server(data, all_formats, authorization=auth)
s.app.testing = True
return s
@pytest.yield_fixture
def test_with_auth(server_with_auth):
with server_with_auth.app.test_client() as c:
yield c
def basic_auth(username, password):
return (
b'Basic ' + b64encode(':'.join((username, password)).encode('utf-8'))
)
@pytest.mark.parametrize('serial', all_formats)
def test_auth(test_with_auth, username, password, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
r = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username, password)},
)
assert r.status_code == 200
headers = mimetype(serial)
headers['authorization'] = basic_auth(username, password)
s = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert s.status_code == 200
u = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username + 'a', password + 'a')},
)
assert u.status_code == 401
headers['authorization'] = basic_auth(username + 'a', password + 'a')
v = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert v.status_code == 401
@pytest.mark.parametrize('serial', all_formats)
def test_minute_query(test, serial):
expr = t.events.when.minute
query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(query)
)
expected = {
'data': [0, 0],
'names': ['when_minute'],
'datashape': '2 * int64'
}
assert result.status_code == 200
assert expected == serial.loads(result.data)
| {
"repo_name": "jdmcbr/blaze",
"path": "blaze/server/tests/test_server.py",
"copies": "3",
"size": "13833",
"license": "bsd-3-clause",
"hash": -4390652995162344400,
"line_mean": 27.3463114754,
"line_max": 78,
"alpha_frac": 0.5990023856,
"autogenerated": false,
"ratio": 3.5487429451000514,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5647745330700051,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
from pandas import DataFrame
from blaze import compute, Data, by, into, discover
from blaze.expr import Expr, symbol, Field
from blaze.dispatch import dispatch
from blaze.server import Server
from blaze.server.client import Client, resource
df = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
df2 = DataFrame([['Charlie', 100], ['Dan', 200]],
columns=['name', 'amount'])
data = {'accounts': df, 'accounts2': df}
server = Server(data)
test = server.app.test_client()
from blaze.server import client
client.requests = test # OMG monkey patching
def test_client():
c = Client('localhost:6363')
assert str(discover(c)) == str(discover(data))
t = symbol('t', discover(c))
expr = t.accounts.amount.sum()
assert compute(expr, c) == 300
assert 'name' in t.accounts.fields
assert isinstance(t.accounts.name, Field)
assert compute(t.accounts.name, c) == ['Alice', 'Bob']
def test_expr_client_interactive():
c = Client('localhost:6363')
t = Data(c)
assert compute(t.accounts.name) == ['Alice', 'Bob']
assert (into(set, compute(by(t.accounts.name, min=t.accounts.amount.min(),
max=t.accounts.amount.max())))
== set([('Alice', 100, 100), ('Bob', 200, 200)]))
def test_compute_client_with_multiple_datasets():
c = resource('blaze://localhost:6363')
s = symbol('s', discover(c))
assert compute(s.accounts.amount.sum() + s.accounts2.amount.sum(),
{s: c}) == 600
def test_resource():
c = resource('blaze://localhost:6363')
assert isinstance(c, Client)
assert str(discover(c)) == str(discover(data))
def test_resource_default_port():
ec = resource('blaze://localhost')
assert str(discover(ec)) == str(discover(data))
def test_resource_non_default_port():
ec = resource('blaze://localhost:6364')
assert ec.url == 'http://localhost:6364'
def test_resource_all_in_one():
ec = resource('blaze://localhost:6363')
assert str(discover(ec)) == str(discover(data))
class CustomExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
@dispatch(CustomExpr, DataFrame)
def compute_up(expr, data, **kwargs):
return data
def test_custom_expressions():
ec = Client('localhost:6363')
t = symbol('t', discover(ec))
assert list(map(tuple, compute(CustomExpr(t.accounts), ec))) == into(list, df)
def test_client_dataset_fails():
with pytest.raises(ValueError):
Data('blaze://localhost::accounts')
with pytest.raises(ValueError):
resource('blaze://localhost::accounts')
def test_client_dataset():
d = Data('blaze://localhost')
assert list(map(tuple, into(list, d.accounts))) == into(list, df)
| {
"repo_name": "jdmcbr/blaze",
"path": "blaze/server/tests/test_client.py",
"copies": "13",
"size": "2942",
"license": "bsd-3-clause",
"hash": 2232242739188173800,
"line_mean": 25.5045045045,
"line_max": 82,
"alpha_frac": 0.6363018355,
"autogenerated": false,
"ratio": 3.489916963226572,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024791755354725913,
"num_lines": 111
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
from operator import add
from tempfile import mkdtemp
import shutil
import os
from toolz import merge
from toolz.curried import identity
import dask
import dask.array as da
from dask.async import get_sync
from dask.array.core import *
from dask.utils import raises, ignoring, tmpfile
inc = lambda x: x + 1
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_getem():
assert getem('X', (2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_concatenate3():
x = np.array([1, 2])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
x = np.array([[1, 2]])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
def test_concatenate3_on_scalars():
assert eq(concatenate3([1, 2]), np.array([1, 2]))
def eq(a, b):
if isinstance(a, Array):
adt = a._dtype
a = a.compute(get=dask.get)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b._dtype
b = b.compute(get=dask.get)
else:
bdt = getattr(b, 'dtype', None)
if not str(adt) == str(bdt):
return False
try:
return np.allclose(a, b)
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
return c.all()
else:
return c
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(concatenate3(out), x.T + 1)
def test_transpose():
x = np.arange(240).reshape((4, 6, 10))
d = da.from_array(x, (2, 3, 4))
assert eq(d.transpose((2, 0, 1)),
x.transpose((2, 0, 1)))
assert same_keys(d.transpose((2, 0, 1)), d.transpose((2, 0, 1)))
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10))
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3))
assert eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == (getarray, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getarray, ('C', 1, 0),
(None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getarray, ('B', 1, 0),
(colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getarray, ('C', 1, 1),
(colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == \
stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
assert Array._get(s.dask, s._keys())[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).chunks == \
concatenate([a, b, c], axis=1).chunks
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_take():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(np.take(x, 3, axis=0), take(a, 3, axis=0))
assert eq(np.take(x, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
assert raises(ValueError, lambda: take(a, 3, axis=2))
assert same_keys(take(a, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
def test_binops():
a = Array(dict((('a', i), np.array([''])) for i in range(3)),
'a', chunks=((1, 1, 1),))
b = Array(dict((('b', i), np.array([''])) for i in range(3)),
'b', chunks=((1, 1, 1),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert result.dask[('c', 0)][1] == ('a', 0)
f = result.dask[('c', 0)][0]
assert f(10) == 100
def test_isnull():
x = np.array([1, np.nan])
a = from_array(x, chunks=(2,))
with ignoring(ImportError):
assert eq(isnull(a), np.isnan(x))
assert eq(notnull(a), ~np.isnan(x))
def test_isclose():
x = np.array([0, np.nan, 1, 1.5])
y = np.array([1e-9, np.nan, 1, 2])
a = from_array(x, chunks=(2,))
b = from_array(y, chunks=(2,))
assert eq(da.isclose(a, b, equal_nan=True),
np.isclose(x, y, equal_nan=True))
def test_broadcast_shapes():
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
assert raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
assert raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 2
assert eq(a.sum()**2, x.sum()**2)
x = np.arange(11)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 3
assert eq(a, x)
def test_partial_by_order():
f = partial_by_order(add, [(1, 20)])
assert f(5) == 25
assert f.__name__ == 'add(20)'
f = partial_by_order(lambda x, y, z: x + y + z, [(1, 10), (2, 15)])
assert f(3) == 28
assert f.__name__ == '<lambda>(...)'
assert raises(ValueError, lambda: partial_by_order(add, 1))
assert raises(ValueError, lambda: partial_by_order(add, [1]))
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert eq(x + a, 2 * x)
assert eq(a + x, 2 * x)
assert eq(x + b, x + y)
assert eq(b + x, x + y)
assert eq(a + y, x + y)
assert eq(y + a, x + y)
# Error on shape mismatch
assert raises(ValueError, lambda: a + y.T)
assert raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert eq(a + b, x + y)
assert eq(b + a, x + y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert eq(c, x + 1)
c = a + b
assert eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert eq(c, np.exp(x))
assert eq(abs(-a), a)
assert eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert eq(x + 1, a + 1) # still float32
assert eq(x + 1e50, a + 1e50) # now float64
assert eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert eq(y['a'], x['a'])
assert eq(y[['b', 'a']], x[['b', 'a']])
assert same_keys(y[['b', 'a']], y[['b', 'a']])
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(tensordot(a, b, axes=1), np.tensordot(x, y, axes=1))
assert eq(tensordot(a, b, axes=(1, 0)), np.tensordot(x, y, axes=(1, 0)))
assert same_keys(tensordot(a, b, axes=(1, 0)), tensordot(a, b, axes=(1, 0)))
assert not same_keys(tensordot(a, b, axes=0), tensordot(a, b, axes=1))
# assert (tensordot(a, a).chunks
# == tensordot(a, a, axes=((1, 0), (0, 1))).chunks)
# assert eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, chunks=(5, 5))
assert eq(b.vnorm(), np.linalg.norm(a))
assert eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
assert eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, chunks=(4,))
assert eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_where_has_informative_error():
x = da.ones(5, chunks=3)
try:
result = da.where(x > 0)
except Exception as e:
assert 'dask' in str(e)
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, chunks=(4, 8))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_coarsen_with_excess():
x = da.arange(10, chunks=5)
assert eq(coarsen(np.min, x, {0: 3}, trim_excess=True),
np.array([0, 5]))
assert eq(coarsen(np.sum, x, {0: 3}, trim_excess=True),
np.array([0+1+2, 5+6+7]))
def test_insert():
x = np.random.randint(10, size=(10, 10))
a = from_array(x, chunks=(5, 5))
y = np.random.randint(10, size=(5, 10))
b = from_array(y, chunks=(4, 4))
assert eq(np.insert(x, 0, -1, axis=0), insert(a, 0, -1, axis=0))
assert eq(np.insert(x, 3, -1, axis=-1), insert(a, 3, -1, axis=-1))
assert eq(np.insert(x, 5, -1, axis=1), insert(a, 5, -1, axis=1))
assert eq(np.insert(x, -1, -1, axis=-2), insert(a, -1, -1, axis=-2))
assert eq(np.insert(x, [2, 3, 3], -1, axis=1),
insert(a, [2, 3, 3], -1, axis=1))
assert eq(np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
assert eq(np.insert(x, slice(1, 4), -1, axis=1),
insert(a, slice(1, 4), -1, axis=1))
assert eq(np.insert(x, [2] * 3 + [5] * 2, y, axis=0),
insert(a, [2] * 3 + [5] * 2, b, axis=0))
assert eq(np.insert(x, 0, y[0], axis=1),
insert(a, 0, b[0], axis=1))
assert raises(NotImplementedError, lambda: insert(a, [4, 2], -1, axis=0))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=2))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=-3))
assert same_keys(insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
def test_multi_insert():
z = np.random.randint(10, size=(1, 2))
c = from_array(z, chunks=(1, 2))
assert eq(np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),
insert(insert(c, [0, 1], -1, axis=0), [1], -1, axis=1))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
assert eq(chunk.broadcast_to(x, shape),
broadcast_to(a, shape))
assert raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
assert raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_ravel():
x = np.random.randint(10, size=(4, 6))
# 2d
# these should use the shortcut
for chunks in [(4, 6), (2, 6)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks in [(4, 2), (2, 2)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) > len(a.dask) + len(a.chunks[0])
# 0d
assert eq(x[0, 0].ravel(), a[0, 0].ravel())
# 1d
a_flat = a.ravel()
assert a_flat.ravel() is a_flat
# 3d
x = np.random.randint(10, size=(2, 3, 4))
for chunks in [2, 4, (2, 3, 2), (1, 3, 4)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert eq(x.flatten(), a.flatten())
assert eq(np.ravel(x), da.ravel(a))
def test_unravel():
x = np.random.randint(10, size=24)
# these should use the shortcut
for chunks, shape in [(24, (3, 8)),
(24, (12, 2)),
(6, (4, 6)),
(6, (4, 3, 2)),
(6, (4, 6, 1)),
(((6, 12, 6),), (4, 6))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks, shape in [(6, (2, 12)),
(6, (1, 4, 6)),
(6, (2, 1, 12))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) > len(a.dask) + len(a.chunks[0])
assert raises(AssertionError, lambda: unravel(unraveled, (3, 8)))
assert unravel(a, a.shape) is a
def test_reshape():
shapes = [(24,), (2, 12), (2, 3, 4)]
for original_shape in shapes:
for new_shape in shapes:
for chunks in [2, 4, 12]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks)
assert eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (100,)))
assert eq(x.reshape(*new_shape), a.reshape(*new_shape))
assert eq(np.reshape(x, new_shape), reshape(a, new_shape))
# verify we can reshape a single chunk array without too many tasks
x = np.random.randint(10, size=(10, 20))
a = from_array(x, 20) # all one chunk
reshaped = a.reshape((20, 10))
assert eq(x.reshape((20, 10)), reshaped)
assert len(reshaped.dask) == len(a.dask) + 2
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 4)
assert eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (-1, -1)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert eq(d, np.full((3, 4), 2))
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert eq(e, x + 1)
e = d.map_blocks(inc, name='increment')
assert e.name == 'increment'
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
out = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert eq(out, expected)
assert same_keys(d.map_blocks(func, dtype='i8'), out)
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8')
assert eq(d, np.fromfunction(f, shape=(5, 5)))
assert same_keys(d, fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8'))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert d.name[:5] in repr(d)
assert str(d.shape) in repr(d)
assert str(d._dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert eq(d[..., 1], x[..., 1])
assert eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert eq(d[np.arange(8)], x)
assert eq(d[np.ones(8, dtype=bool)], x)
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
assert raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,)*10, (3,))
assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)
def test_coerce():
d = da.from_array(np.array([1]), chunks=(1,))
with dask.set_options(get=dask.get):
assert bool(d)
assert int(d)
assert float(d)
assert complex(d)
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
store([a, b], [at, bt])
assert (at == 2).all()
assert (bt == 3).all()
assert raises(ValueError, lambda: store([a], [at, bt]))
assert raises(ValueError, lambda: store(at, at))
assert raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_to_hdf5():
try:
import h5py
except ImportError:
return
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype='i4')
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=None)
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks is None
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=(1, 1))
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile('.hdf5') as fn:
da.to_hdf5(fn, {'/x': x, '/y': y})
with h5py.File(fn) as f:
assert eq(f['/x'][:], x)
assert f['/x'].chunks == (2, 2)
assert eq(f['/y'][:], y)
assert f['/y'].chunks == (2,)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_unique():
x = np.array([1, 2, 4, 4, 5, 2])
d = da.from_array(x, chunks=(3,))
assert eq(da.unique(d), np.unique(x))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert eq(a._dtype, x.dtype)
assert eq(b._dtype, y.dtype)
assert eq((a + 1)._dtype, (x + 1).dtype)
assert eq((a + b)._dtype, (x + y).dtype)
assert eq(a.T._dtype, x.T.dtype)
assert eq(a[:3]._dtype, x[:3].dtype)
assert eq((a.dot(b.T))._dtype, (x.dot(y.T)).dtype)
assert eq(stack([a, b])._dtype, np.vstack([x, y]).dtype)
assert eq(concatenate([a, b])._dtype, np.concatenate([x, y]).dtype)
assert eq(b.std()._dtype, y.std().dtype)
assert eq(c.sum()._dtype, z.sum().dtype)
assert eq(a.min()._dtype, a.min().dtype)
assert eq(b.std()._dtype, b.std().dtype)
assert eq(a.argmin(axis=0)._dtype, a.argmin(axis=0).dtype)
assert eq(da.sin(c)._dtype, np.sin(z).dtype)
assert eq(da.exp(b)._dtype, np.exp(y).dtype)
assert eq(da.floor(a)._dtype, np.floor(x).dtype)
assert eq(da.isnan(b)._dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b)._dtype == 'bool'
assert da.notnull(b)._dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert eq(d['text']._dtype, x['text'].dtype)
assert eq(d[['numbers', 'text']]._dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones(5, dtype='f4')
d = da.from_array(x, chunks=(2,))
assert d.astype('i8')._dtype == 'i8'
assert eq(d.astype('i8'), x.astype('i8'))
assert same_keys(d.astype('i8'), d.astype('i8'))
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert eq(a + b, x + y)
assert eq(a * b, x * y)
assert eq(a - b, x - y)
assert eq(a / b, x / y)
assert eq(b & b, y & y)
assert eq(b | b, y | y)
assert eq(b ^ b, y ^ y)
assert eq(a // b, x // y)
assert eq(a ** b, x ** y)
assert eq(a % b, x % y)
assert eq(a > b, x > y)
assert eq(a < b, x < y)
assert eq(a >= b, x >= y)
assert eq(a <= b, x <= y)
assert eq(a == b, x == y)
assert eq(a != b, x != y)
assert eq(a + 2, x + 2)
assert eq(a * 2, x * 2)
assert eq(a - 2, x - 2)
assert eq(a / 2, x / 2)
assert eq(b & True, y & True)
assert eq(b | True, y | True)
assert eq(b ^ True, y ^ True)
assert eq(a // 2, x // 2)
assert eq(a ** 2, x ** 2)
assert eq(a % 2, x % 2)
assert eq(a > 2, x > 2)
assert eq(a < 2, x < 2)
assert eq(a >= 2, x >= 2)
assert eq(a <= 2, x <= 2)
assert eq(a == 2, x == 2)
assert eq(a != 2, x != 2)
assert eq(2 + b, 2 + y)
assert eq(2 * b, 2 * y)
assert eq(2 - b, 2 - y)
assert eq(2 / b, 2 / y)
assert eq(True & b, True & y)
assert eq(True | b, True | y)
assert eq(True ^ b, True ^ y)
assert eq(2 // b, 2 // y)
assert eq(2 ** b, 2 ** y)
assert eq(2 % b, 2 % y)
assert eq(2 > b, 2 > y)
assert eq(2 < b, 2 < y)
assert eq(2 >= b, 2 >= y)
assert eq(2 <= b, 2 <= y)
assert eq(2 == b, 2 == y)
assert eq(2 != b, 2 != y)
assert eq(-a, -x)
assert eq(abs(a), abs(x))
assert eq(~(a == b), ~(x == y))
assert eq(~(a == b), ~(x == y))
assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
assert eq(da.exp(b), np.exp(y))
assert eq(da.log(a), np.log(x))
assert eq(da.log10(a), np.log10(x))
assert eq(da.log1p(a), np.log1p(x))
assert eq(da.expm1(b), np.expm1(y))
assert eq(da.sqrt(a), np.sqrt(x))
assert eq(da.square(a), np.square(x))
assert eq(da.sin(a), np.sin(x))
assert eq(da.cos(b), np.cos(y))
assert eq(da.tan(a), np.tan(x))
assert eq(da.arcsin(b/10), np.arcsin(y/10))
assert eq(da.arccos(b/10), np.arccos(y/10))
assert eq(da.arctan(b/10), np.arctan(y/10))
assert eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
assert eq(da.hypot(b, a), np.hypot(y, x))
assert eq(da.sinh(a), np.sinh(x))
assert eq(da.cosh(b), np.cosh(y))
assert eq(da.tanh(a), np.tanh(x))
assert eq(da.arcsinh(b*10), np.arcsinh(y*10))
assert eq(da.arccosh(b*10), np.arccosh(y*10))
assert eq(da.arctanh(b/10), np.arctanh(y/10))
assert eq(da.deg2rad(a), np.deg2rad(x))
assert eq(da.rad2deg(a), np.rad2deg(x))
assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert eq(da.isfinite(a), np.isfinite(x))
assert eq(da.isinf(a), np.isinf(x))
assert eq(da.isnan(a), np.isnan(x))
assert eq(da.signbit(a - 3), np.signbit(x - 3))
assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
assert eq(da.ldexp(c, c), np.ldexp(z, z))
assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert eq(da.ceil(a), np.ceil(x))
assert eq(da.trunc(a / 2), np.trunc(x / 2))
assert eq(da.degrees(b), np.degrees(y))
assert eq(da.radians(a), np.radians(x))
assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert eq(da.angle(a + 1j), np.angle(x + 1j))
assert eq(da.real(a + 1j), np.real(x + 1j))
assert eq((a + 1j).real, np.real(x + 1j))
assert eq(da.imag(a + 1j), np.imag(x + 1j))
assert eq((a + 1j).imag, np.imag(x + 1j))
assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert eq(da.fabs(b), np.fabs(y))
assert eq(da.sign(b - 2), np.sign(y - 2))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert eq(l1, r1)
assert eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert eq(l1, r1)
assert eq(l2, r2)
assert eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
b = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype='f8'), da.exp(a, dtype='f8'))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr._keys())
assert isinstance(result, dict)
assert all(key in result for key in expr._keys())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = 'i8'
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getarray():
assert type(getarray(np.matrix([[1]]), 0)) == np.ndarray
assert eq(getarray([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert eq(getarray(np.arange(5), (None, slice(None, None))),
np.arange(5)[None, :])
def test_squeeze():
x = da.ones((10, 1), chunks=(3, 1))
assert eq(x.squeeze(), x.compute().squeeze())
assert x.squeeze().chunks == ((3, 3, 3, 1),)
assert same_keys(x.squeeze(), x.squeeze())
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_args():
x = da.ones((10, 2), chunks=(3, 1), dtype='i4') + 1
y = Array(*x._args)
assert eq(x, y)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert isinstance(tasks[0][3], type(Lock()))
assert len(set(task[3] for task in tasks)) == 1
assert eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert eq(e + f, x + x)
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={'n': 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert eq(d.compute(), 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={'n': 2}))
def test_topk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(2, d)
assert e.chunks == ((2,),)
assert eq(e, np.sort(x)[-1:-3:-1])
assert same_keys(da.topk(2, d), e)
def test_topk_k_bigger_than_chunk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(3, d)
assert e.chunks == ((3,),)
assert eq(e, np.array([6, 5, 2]))
def test_bincount():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
e = da.bincount(d, minlength=6)
assert eq(e, np.bincount(x, minlength=6))
assert same_keys(da.bincount(d, minlength=6), e)
def test_bincount_with_weights():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
weights = np.array([1, 2, 1, 0.5, 1])
dweights = da.from_array(weights, chunks=2)
e = da.bincount(d, weights=dweights, minlength=6)
assert eq(e, np.bincount(x, weights=dweights, minlength=6))
assert same_keys(da.bincount(d, weights=dweights, minlength=6), e)
def test_bincount_raises_informative_error_on_missing_minlength_kwarg():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
try:
da.bincount(d)
except Exception as e:
assert 'minlength' in str(e)
else:
assert False
def test_histogram():
# Test for normal, flattened input
n = 100
v = da.random.random(n, chunks=10)
bins = np.arange(0, 1.01, 0.01)
(a1, b1) = da.histogram(v, bins=bins)
(a2, b2) = np.histogram(v, bins=bins)
# Check if the sum of the bins equals the number of samples
assert a2.sum(axis=0) == n
assert a1.sum(axis=0) == n
assert eq(a1, a2)
assert same_keys(da.histogram(v, bins=bins)[0], a1)
def test_histogram_alternative_bins_range():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Other input
(a1, b1) = da.histogram(v, bins=10, range=(0, 1))
(a2, b2) = np.histogram(v, bins=10, range=(0, 1))
assert eq(a1, a2)
assert eq(b1, b2)
def test_histogram_return_type():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Check if return type is same as hist
bins = np.arange(0, 11, 1, dtype='i4')
assert eq(da.histogram(v * 10, bins=bins)[0],
np.histogram(v * 10, bins=bins)[0])
def test_histogram_extra_args_and_shapes():
# Check for extra args and shapes
bins = np.arange(0, 1.01, 0.01)
v = da.random.random(100, chunks=10)
data = [(v, bins, da.ones(100, chunks=v.chunks) * 5),
(da.random.random((50, 50), chunks=10), bins, da.ones((50, 50), chunks=10) * 5)]
for v, bins, w in data:
# density
assert eq(da.histogram(v, bins=bins, normed=True)[0],
np.histogram(v, bins=bins, normed=True)[0])
# normed
assert eq(da.histogram(v, bins=bins, density=True)[0],
np.histogram(v, bins=bins, density=True)[0])
# weights
assert eq(da.histogram(v, bins=bins, weights=w)[0],
np.histogram(v, bins=bins, weights=w)[0])
assert eq(da.histogram(v, bins=bins, weights=w, density=True)[0],
da.histogram(v, bins=bins, weights=w, density=True)[0])
def test_concatenate3():
x = np.array([1, 2])
assert eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (concatenate3([[x, x, x], [x, x, x]]) ==
np.array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])).all()
assert (concatenate3([[x, x], [x, x], [x, x]]) ==
np.array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])).all()
x = np.arange(12).reshape((2, 2, 3))
assert eq(concatenate3([[[x, x, x],
[x, x, x]],
[[x, x, x],
[x, x, x]]]),
np.array([[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]],
[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]]]))
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert eq(da.core.map_blocks(lambda a, b: a+2*b, d, e, dtype=d.dtype),
x + 2*y)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2*b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert eq(res, x + 2*z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_cache():
x = da.arange(15, chunks=5)
y = 2 * x + 1
z = y.cache()
assert len(z.dask) == 3 # very short graph
assert eq(y, z)
cache = np.empty(15, dtype=y.dtype)
z = y.cache(store=cache)
assert len(z.dask) < 6 # very short graph
assert z.chunks == y.chunks
assert eq(y, z)
def test_take_dask_from_numpy():
x = np.arange(5).astype('f8')
y = da.from_array(np.array([1, 2, 3, 3, 2 ,1]), chunks=3)
z = da.take(x * 2, y)
assert z.chunks == y.chunks
assert eq(z, np.array([2., 4., 6., 6., 4., 2.]))
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask.pydata.org" in str(e)
assert raises(ValueError, lambda: da.ones(6))
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert 'rechunk(2)' in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, out=None)
except TypeError as e:
assert 'minimum' in str(e)
assert 'out' in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
try:
import h5py
except ImportError:
return
with tmpfile('h5') as fn:
with h5py.File(fn) as f:
x = f.create_dataset('/x', shape=(10, 10), dtype='f8')
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(get=get_sync).shape == (1, 10, 10)
assert d[:, None, :].compute(get=get_sync).shape == (10, 1, 10)
assert d[:, :, None].compute(get=get_sync).shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _vindex_transpose, _get_axis
x = np.arange(4*5*6*7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
for i in ind]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
assert raises(IndexError, lambda: d.vindex[0])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [1, 2, 3], 0])
assert raises(IndexError, lambda: d.vindex[[1], [1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [[1], [2], [3]]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]),
np.array([[10, 20, 30], [40, 50, 60]])]
assert (_vindex_merge(locations, values) == np.array([[40, 50, 60],
[1, 2, 3],
[10, 20, 30]])).all()
def test_empty_array():
assert eq(np.arange(0), da.arange(0, chunks=5))
def test_array():
x = np.ones(5, dtype='i4')
d = da.ones(5, chunks=3, dtype='i4')
assert eq(da.array(d, ndmin=3, dtype='i8'),
np.array(x, ndmin=3, dtype='i8'))
def test_cov():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(4, 4))
assert eq(da.cov(d), np.cov(x))
assert eq(da.cov(d, rowvar=0), np.cov(x, rowvar=0))
assert eq(da.cov(d, ddof=10), np.cov(x, ddof=10))
assert eq(da.cov(d, bias=1), np.cov(x, bias=1))
assert eq(da.cov(d, d), np.cov(x, x))
y = np.arange(8)
e = da.from_array(y, chunks=(4,))
assert eq(da.cov(d, e), np.cov(x, y))
assert eq(da.cov(e, d), np.cov(y, x))
assert raises(ValueError, lambda: da.cov(d, ddof=1.5))
def test_memmap():
with tmpfile('npy') as fn_1:
with tmpfile('npy') as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode='w+', dtype=x.dtype)
x.store(target)
assert eq(target, x)
np.save(fn_2, target)
assert eq(np.load(fn_2, mmap_mode='r'), x)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5*10*10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
dirname = mkdtemp()
try:
da.to_npy_stack(dirname, d, axis=0)
assert os.path.exists(os.path.join(dirname, '0.npy'))
assert (np.load(os.path.join(dirname, '1.npy')) == x[2:4]).all()
e = da.from_npy_stack(dirname)
assert eq(d, e)
finally:
shutil.rmtree(dirname)
| {
"repo_name": "pombredanne/dask",
"path": "dask/array/tests/test_array_core.py",
"copies": "1",
"size": "47817",
"license": "bsd-3-clause",
"hash": -6772545908611036000,
"line_mean": 29.6519230769,
"line_max": 92,
"alpha_frac": 0.5044858523,
"autogenerated": false,
"ratio": 2.698780900778869,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3703266753078869,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
from operator import add
from toolz import merge
from toolz.curried import identity
import dask
import dask.array as da
from dask.array.core import *
from dask.utils import raises, ignoring, tmpfile
inc = lambda x: x + 1
def test_getem():
assert getem('X', (2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_concatenate3():
x = np.array([1, 2])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
x = np.array([[1, 2]])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
def eq(a, b):
if isinstance(a, Array):
adt = a._dtype
a = a.compute(get=dask.get)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b._dtype
b = b.compute(get=dask.get)
else:
bdt = getattr(b, 'dtype', None)
if not str(adt) == str(bdt):
return False
try:
return np.allclose(a, b)
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
return c.all()
else:
return c
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(concatenate3(out), x.T + 1)
def test_transpose():
x = np.arange(240).reshape((4, 6, 10))
d = da.from_array(x, (2, 3, 4))
assert eq(d.transpose((2, 0, 1)),
x.transpose((2, 0, 1)))
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10))
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3))
assert eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == (getarray, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getarray, ('C', 1, 0),
(None, colon, colon))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getarray, ('B', 1, 0),
(colon, None, colon))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getarray, ('C', 1, 1),
(colon, colon, None))
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == \
stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
assert get(s.dask, s._keys())[0][0].shape == (1, 1)
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).chunks == \
concatenate([a, b, c], axis=1).chunks
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_take():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(np.take(x, 3, axis=0), take(a, 3, axis=0))
assert eq(np.take(x, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
assert raises(ValueError, lambda: take(a, 3, axis=2))
def test_binops():
a = Array(dict((('a', i), '') for i in range(3)),
'a', chunks=((10, 10, 10),))
b = Array(dict((('b', i), '') for i in range(3)),
'b', chunks=((10, 10, 10),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert result.dask[('c', 0)][1] == ('a', 0)
f = result.dask[('c', 0)][0]
assert f(10) == 100
def test_isnull():
x = np.array([1, np.nan])
a = from_array(x, chunks=(2,))
with ignoring(ImportError):
assert eq(isnull(a), np.isnan(x))
assert eq(notnull(a), ~np.isnan(x))
def test_isclose():
x = np.array([0, np.nan, 1, 1.5])
y = np.array([1e-9, np.nan, 1, 2])
a = from_array(x, chunks=(2,))
b = from_array(y, chunks=(2,))
assert eq(da.isclose(a, b, equal_nan=True),
np.isclose(x, y, equal_nan=True))
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 2
assert eq(a.sum()**2, x.sum()**2)
x = np.arange(11)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 3
assert eq(a, x)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert eq(c, x + 1)
c = a + b
assert eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert eq(c, np.exp(x))
assert eq(abs(-a), a)
assert eq(a, +x)
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert eq(y['a'], x['a'])
assert eq(y[['b', 'a']], x[['b', 'a']])
def test_reductions():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(7, 7))
assert eq(a.sum(), x.sum())
assert eq(a.sum(axis=1), x.sum(axis=1))
assert eq(a.sum(axis=1, keepdims=True), x.sum(axis=1, keepdims=True))
assert eq(a.mean(), x.mean())
assert eq(a.var(axis=(1, 0)), x.var(axis=(1, 0)))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
assert eq(a.std(axis=0, keepdims=True), x.std(axis=0, keepdims=True))
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(tensordot(a, b, axes=1), np.tensordot(x, y, axes=1))
assert eq(tensordot(a, b, axes=(1, 0)), np.tensordot(x, y, axes=(1, 0)))
# assert (tensordot(a, a).chunks
# == tensordot(a, a, axes=((1, 0), (0, 1))).chunks)
# assert eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, chunks=(5, 5))
assert eq(b.vnorm(), np.linalg.norm(a))
assert eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
assert eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, chunks=(4,))
assert eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_where_has_informative_error():
x = da.ones(5, chunks=3)
try:
result = da.where(x > 0)
except Exception as e:
assert 'dask' in str(e)
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, chunks=(4, 8))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_insert():
x = np.random.randint(10, size=(10, 10))
a = from_array(x, chunks=(5, 5))
y = np.random.randint(10, size=(5, 10))
b = from_array(y, chunks=(4, 4))
assert eq(np.insert(x, 0, -1, axis=0), insert(a, 0, -1, axis=0))
assert eq(np.insert(x, 3, -1, axis=-1), insert(a, 3, -1, axis=-1))
assert eq(np.insert(x, 5, -1, axis=1), insert(a, 5, -1, axis=1))
assert eq(np.insert(x, -1, -1, axis=-2), insert(a, -1, -1, axis=-2))
assert eq(np.insert(x, [2, 3, 3], -1, axis=1),
insert(a, [2, 3, 3], -1, axis=1))
assert eq(np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
assert eq(np.insert(x, slice(1, 4), -1, axis=1),
insert(a, slice(1, 4), -1, axis=1))
assert eq(np.insert(x, [2] * 3 + [5] * 2, y, axis=0),
insert(a, [2] * 3 + [5] * 2, b, axis=0))
assert eq(np.insert(x, 0, y[0], axis=1),
insert(a, 0, b[0], axis=1))
assert raises(NotImplementedError, lambda: insert(a, [4, 2], -1, axis=0))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=2))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=-3))
def test_multi_insert():
z = np.random.randint(10, size=(1, 2))
c = from_array(z, chunks=(1, 2))
assert eq(np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),
insert(insert(c, [0, 1], -1, axis=0), [1], -1, axis=1))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
assert eq(chunk.broadcast_to(x, shape),
broadcast_to(a, shape))
assert raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
assert raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert eq(d, np.full((3, 4), 2))
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert eq(e, x + 1)
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
d = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert eq(d, expected)
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8')
assert eq(d, np.fromfunction(f, shape=(5, 5)))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert d.name in repr(d)
assert str(d.shape) in repr(d)
assert str(d.chunks) in repr(d)
assert str(d._dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert eq(d[..., 1], x[..., 1])
assert eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert eq(d[np.arange(8)], x)
assert eq(d[np.ones(8, dtype=bool)], x)
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
assert raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
def test_compute():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
A, B = compute(a, b)
assert eq(A, d + 1)
assert eq(B, d + 2)
A, = compute(a)
assert eq(A, d + 1)
def test_coerce():
d = da.from_array(np.array([1]), chunks=(1,))
with dask.set_options(get=dask.get):
assert bool(d)
assert int(d)
assert float(d)
assert complex(d)
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
store([a, b], [at, bt])
assert (at == 2).all()
assert (bt == 3).all()
assert raises(ValueError, lambda: store([a], [at, bt]))
assert raises(ValueError, lambda: store(at, at))
assert raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_to_hdf5():
try:
import h5py
except ImportError:
return
x = da.ones((4, 4), chunks=(2, 2))
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (2, 2)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_unique():
x = np.array([1, 2, 4, 4, 5, 2])
d = da.from_array(x, chunks=(3,))
assert eq(da.unique(d), np.unique(x))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert eq(a._dtype, x.dtype)
assert eq(b._dtype, y.dtype)
assert eq((a + 1)._dtype, (x + 1).dtype)
assert eq((a + b)._dtype, (x + y).dtype)
assert eq(a.T._dtype, x.T.dtype)
assert eq(a[:3]._dtype, x[:3].dtype)
assert eq((a.dot(b.T))._dtype, (x.dot(y.T)).dtype)
assert eq(stack([a, b])._dtype, np.vstack([x, y]).dtype)
assert eq(concatenate([a, b])._dtype, np.concatenate([x, y]).dtype)
assert eq(b.std()._dtype, y.std().dtype)
assert eq(c.sum()._dtype, z.sum().dtype)
assert eq(a.min()._dtype, a.min().dtype)
assert eq(b.std()._dtype, b.std().dtype)
assert eq(a.argmin(axis=0)._dtype, a.argmin(axis=0).dtype)
assert eq(da.sin(z)._dtype, np.sin(c).dtype)
assert eq(da.exp(b)._dtype, np.exp(y).dtype)
assert eq(da.floor(a)._dtype, np.floor(x).dtype)
assert eq(da.isnan(b)._dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b)._dtype == 'bool'
assert da.notnull(b)._dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert eq(d['text']._dtype, x['text'].dtype)
assert eq(d[['numbers', 'text']]._dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones(5, dtype='f4')
d = da.from_array(x, chunks=(2,))
assert d.astype('i8')._dtype == 'i8'
assert eq(d.astype('i8'), x.astype('i8'))
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert eq(a + b, x + y)
assert eq(a * b, x * y)
assert eq(a - b, x - y)
assert eq(a / b, x / y)
assert eq(b & b, y & y)
assert eq(b | b, y | y)
assert eq(b ^ b, y ^ y)
assert eq(a // b, x // y)
assert eq(a ** b, x ** y)
assert eq(a % b, x % y)
assert eq(a > b, x > y)
assert eq(a < b, x < y)
assert eq(a >= b, x >= y)
assert eq(a <= b, x <= y)
assert eq(a == b, x == y)
assert eq(a != b, x != y)
assert eq(a + 2, x + 2)
assert eq(a * 2, x * 2)
assert eq(a - 2, x - 2)
assert eq(a / 2, x / 2)
assert eq(b & True, y & True)
assert eq(b | True, y | True)
assert eq(b ^ True, y ^ True)
assert eq(a // 2, x // 2)
assert eq(a ** 2, x ** 2)
assert eq(a % 2, x % 2)
assert eq(a > 2, x > 2)
assert eq(a < 2, x < 2)
assert eq(a >= 2, x >= 2)
assert eq(a <= 2, x <= 2)
assert eq(a == 2, x == 2)
assert eq(a != 2, x != 2)
assert eq(2 + b, 2 + y)
assert eq(2 * b, 2 * y)
assert eq(2 - b, 2 - y)
assert eq(2 / b, 2 / y)
assert eq(True & b, True & y)
assert eq(True | b, True | y)
assert eq(True ^ b, True ^ y)
assert eq(2 // b, 2 // y)
assert eq(2 ** b, 2 ** y)
assert eq(2 % b, 2 % y)
assert eq(2 > b, 2 > y)
assert eq(2 < b, 2 < y)
assert eq(2 >= b, 2 >= y)
assert eq(2 <= b, 2 <= y)
assert eq(2 == b, 2 == y)
assert eq(2 != b, 2 != y)
assert eq(-a, -x)
assert eq(abs(a), abs(x))
assert eq(~(a == b), ~(x == y))
assert eq(~(a == b), ~(x == y))
assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert eq(da.exp(b), np.exp(y))
assert eq(da.log(a), np.log(x))
assert eq(da.log10(a), np.log10(x))
assert eq(da.log1p(a), np.log1p(x))
assert eq(da.expm1(b), np.expm1(y))
assert eq(da.sqrt(a), np.sqrt(x))
assert eq(da.square(a), np.square(x))
assert eq(da.sin(a), np.sin(x))
assert eq(da.cos(b), np.cos(y))
assert eq(da.tan(a), np.tan(x))
assert eq(da.arcsin(b/10), np.arcsin(y/10))
assert eq(da.arccos(b/10), np.arccos(y/10))
assert eq(da.arctan(b/10), np.arctan(y/10))
assert eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
assert eq(da.hypot(b, a), np.hypot(y, x))
assert eq(da.sinh(a), np.sinh(x))
assert eq(da.cosh(b), np.cosh(y))
assert eq(da.tanh(a), np.tanh(x))
assert eq(da.arcsinh(b*10), np.arcsinh(y*10))
assert eq(da.arccosh(b*10), np.arccosh(y*10))
assert eq(da.arctanh(b/10), np.arctanh(y/10))
assert eq(da.deg2rad(a), np.deg2rad(x))
assert eq(da.rad2deg(a), np.rad2deg(x))
assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert eq(da.isfinite(a), np.isfinite(x))
assert eq(da.isinf(a), np.isinf(x))
assert eq(da.isnan(a), np.isnan(x))
assert eq(da.signbit(a - 3), np.signbit(x - 3))
assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
assert eq(da.ldexp(c, c), np.ldexp(z, z))
assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert eq(da.ceil(a), np.ceil(x))
assert eq(da.trunc(a / 2), np.trunc(x / 2))
assert eq(da.degrees(b), np.degrees(y))
assert eq(da.radians(a), np.radians(x))
assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert eq(da.angle(a + 1j), np.angle(x + 1j))
assert eq(da.real(a + 1j), np.real(x + 1j))
assert eq(da.imag(a + 1j), np.imag(x + 1j))
assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert eq(da.fabs(b), np.fabs(y))
assert eq(da.sign(b - 2), np.fabs(y - 2))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert eq(l1, r1)
assert eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert eq(l1, r1)
assert eq(l2, r2)
assert eq(da.around(a, -1), np.around(x, -1))
def test_reductions():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
assert eq(da.all(a), np.all(x))
assert eq(da.any(a), np.any(x))
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.max(a), np.max(x))
assert eq(da.mean(a), np.mean(x))
assert eq(da.min(a), np.min(x))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert eq(da.nanmax(a), np.nanmax(x))
assert eq(da.nanmin(a), np.nanmin(x))
assert eq(da.nansum(a), np.nansum(x))
assert eq(da.nanvar(a), np.nanvar(x))
assert eq(da.nanstd(a), np.nanstd(x))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr._keys())
assert isinstance(result, dict)
assert all(key in result for key in expr._keys())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = 'i8'
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getarray():
assert type(getarray(np.matrix([[1]]), 0)) == np.ndarray
def test_squeeze():
x = da.ones((10, 1), chunks=(3, 1))
assert eq(x.squeeze(), x.compute().squeeze())
assert x.squeeze().chunks == ((3, 3, 3, 1),)
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_args():
x = da.ones((10, 2), chunks=(3, 1), dtype='i4') + 1
y = Array(*x._args)
assert eq(x, y)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert isinstance(tasks[0][3], type(Lock()))
assert len(set(task[3] for task in tasks)) == 1
assert eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert eq(e + f, x + x)
def test_topk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(2, d)
assert e.chunks == ((2,),)
assert eq(e, np.sort(x)[-1:-3:-1])
def test_bincount():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
assert eq(da.bincount(d, minlength=6), np.bincount(x, minlength=6))
def test_bincount_with_weights():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
weights = np.array([1, 2, 1, 0.5, 1])
dweights = da.from_array(weights, chunks=2)
assert eq(da.bincount(d, weights=dweights, minlength=6),
np.bincount(x, weights=dweights, minlength=6))
def test_bincount_raises_informative_error_on_missing_minlength_kwarg():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
try:
da.bincount(d)
except Exception as e:
assert 'minlength' in str(e)
else:
assert False
def test_concatenate3():
x = np.array([1, 2])
assert eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (concatenate3([[x, x, x], [x, x, x]]) ==
np.array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])).all()
assert (concatenate3([[x, x], [x, x], [x, x]]) ==
np.array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])).all()
x = np.arange(12).reshape((2, 2, 3))
assert eq(concatenate3([[[x, x, x],
[x, x, x]],
[[x, x, x],
[x, x, x]]]),
np.array([[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]],
[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]]]))
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert eq(da.core.map_blocks(lambda a, b: a+2*b, d, e, dtype=d.dtype),
x + 2*y)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
assert eq(da.core.map_blocks(lambda a, b: a+2*b, d, f, dtype=d.dtype),
x + 2*z)
| {
"repo_name": "marianotepper/dask",
"path": "dask/array/tests/test_array_core.py",
"copies": "2",
"size": "31939",
"license": "bsd-3-clause",
"hash": 797746825579494100,
"line_mean": 29.1311320755,
"line_max": 83,
"alpha_frac": 0.4914681111,
"autogenerated": false,
"ratio": 2.62829163923634,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9114052465317068,
"avg_score": 0.0011414570038543075,
"num_lines": 1060
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
from operator import add, sub
from tempfile import mkdtemp
import shutil
import os
from toolz import merge
from toolz.curried import identity
import dask
import dask.array as da
from dask.async import get_sync
from dask.array.core import *
from dask.utils import raises, ignoring, tmpfile
inc = lambda x: x + 1
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_getem():
assert getem('X', (2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_concatenate3():
x = np.array([1, 2])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
x = np.array([[1, 2]])
assert concatenate3([[x, x, x],
[x, x, x]]).shape == (2, 6)
def test_concatenate3_on_scalars():
assert eq(concatenate3([1, 2]), np.array([1, 2]))
def eq(a, b):
if isinstance(a, Array):
adt = a._dtype
a = a.compute(get=dask.get)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b._dtype
b = b.compute(get=dask.get)
else:
bdt = getattr(b, 'dtype', None)
if not str(adt) == str(bdt):
return False
try:
return np.allclose(a, b)
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
return c.all()
else:
return c
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(concatenate3(out), x.T + 1)
def test_transpose():
x = np.arange(240).reshape((4, 6, 10))
d = da.from_array(x, (2, 3, 4))
assert eq(d.transpose((2, 0, 1)),
x.transpose((2, 0, 1)))
assert same_keys(d.transpose((2, 0, 1)), d.transpose((2, 0, 1)))
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10))
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3))
assert eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == (getarray, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getarray, ('C', 1, 0),
(None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getarray, ('B', 1, 0),
(colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getarray, ('C', 1, 1),
(colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == \
stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
assert Array._get(s.dask, s._keys())[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).chunks == \
concatenate([a, b, c], axis=1).chunks
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_vstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert eq(np.vstack((x, y)), da.vstack((a, b)))
assert eq(np.vstack((x, y[None, :])), da.vstack((a, b[None, :])))
def test_hstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert eq(np.hstack((x[None, :], y[None, :])),
da.hstack((a[None, :], b[None, :])))
assert eq(np.hstack((x, y)), da.hstack((a, b)))
def test_dstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert eq(np.dstack((x[None, None, :], y[None, None, :])),
da.dstack((a[None, None, :], b[None, None, :])))
assert eq(np.dstack((x[None, :], y[None, :])),
da.dstack((a[None, :], b[None, :])))
assert eq(np.dstack((x, y)), da.dstack((a, b)))
def test_take():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(np.take(x, 3, axis=0), take(a, 3, axis=0))
assert eq(np.take(x, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
assert raises(ValueError, lambda: take(a, 3, axis=2))
assert same_keys(take(a, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
def test_compress():
x = np.arange(25).reshape((5, 5))
a = from_array(x, chunks=(2, 2))
assert eq(np.compress([True, False, True, False, True], x, axis=0),
da.compress([True, False, True, False, True], a, axis=0))
assert eq(np.compress([True, False, True, False, True], x, axis=1),
da.compress([True, False, True, False, True], a, axis=1))
assert eq(np.compress([True, False], x, axis=1),
da.compress([True, False], a, axis=1))
with pytest.raises(NotImplementedError):
da.compress([True, False], a)
with pytest.raises(ValueError):
da.compress([True, False], a, axis=100)
with pytest.raises(ValueError):
da.compress([[True], [False]], a, axis=100)
def test_binops():
a = Array(dict((('a', i), np.array([''])) for i in range(3)),
'a', chunks=((1, 1, 1),))
b = Array(dict((('b', i), np.array([''])) for i in range(3)),
'b', chunks=((1, 1, 1),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert result.dask[('c', 0)][1] == ('a', 0)
f = result.dask[('c', 0)][0]
assert f(10) == 100
def test_isnull():
x = np.array([1, np.nan])
a = from_array(x, chunks=(2,))
with ignoring(ImportError):
assert eq(isnull(a), np.isnan(x))
assert eq(notnull(a), ~np.isnan(x))
def test_isclose():
x = np.array([0, np.nan, 1, 1.5])
y = np.array([1e-9, np.nan, 1, 2])
a = from_array(x, chunks=(2,))
b = from_array(y, chunks=(2,))
assert eq(da.isclose(a, b, equal_nan=True),
np.isclose(x, y, equal_nan=True))
def test_broadcast_shapes():
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
assert raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
assert raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 2
assert eq(a.sum()**2, x.sum()**2)
x = np.arange(11)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 3
assert eq(a, x)
def test_partial_by_order():
f = partial_by_order(add, [(1, 20)])
assert f(5) == 25
assert f.__name__ == 'add(20)'
f = partial_by_order(lambda x, y, z: x + y + z, [(1, 10), (2, 15)])
assert f(3) == 28
assert f.__name__ == '<lambda>(...)'
assert raises(ValueError, lambda: partial_by_order(add, 1))
assert raises(ValueError, lambda: partial_by_order(add, [1]))
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert eq(x + a, 2 * x)
assert eq(a + x, 2 * x)
assert eq(x + b, x + y)
assert eq(b + x, x + y)
assert eq(a + y, x + y)
assert eq(y + a, x + y)
# Error on shape mismatch
assert raises(ValueError, lambda: a + y.T)
assert raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert eq(a + b, x + y)
assert eq(b + a, x + y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert eq(c, x + 1)
c = a + b
assert eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert eq(c, np.exp(x))
assert eq(abs(-a), a)
assert eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert eq(x + 1, a + 1) # still float32
assert eq(x + 1e50, a + 1e50) # now float64
assert eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert eq(y['a'], x['a'])
assert eq(y[['b', 'a']], x[['b', 'a']])
assert same_keys(y[['b', 'a']], y[['b', 'a']])
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(tensordot(a, b, axes=1), np.tensordot(x, y, axes=1))
assert eq(tensordot(a, b, axes=(1, 0)), np.tensordot(x, y, axes=(1, 0)))
assert same_keys(tensordot(a, b, axes=(1, 0)), tensordot(a, b, axes=(1, 0)))
assert not same_keys(tensordot(a, b, axes=0), tensordot(a, b, axes=1))
# assert (tensordot(a, a).chunks
# == tensordot(a, a, axes=((1, 0), (0, 1))).chunks)
# assert eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, chunks=(5, 5))
assert eq(b.vnorm(), np.linalg.norm(a))
assert eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
split_every = {0: 3, 1: 3}
assert eq(b.vnorm(ord=1, axis=0, split_every=split_every),
np.linalg.norm(a, ord=1, axis=0))
assert eq(b.vnorm(ord=np.inf, axis=0, split_every=split_every),
np.linalg.norm(a, ord=np.inf, axis=0))
assert eq(b.vnorm(ord=np.inf, split_every=split_every),
np.linalg.norm(a.flatten(), ord=np.inf))
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
assert eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, chunks=(4,))
assert eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_where_has_informative_error():
x = da.ones(5, chunks=3)
try:
result = da.where(x > 0)
except Exception as e:
assert 'dask' in str(e)
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, chunks=(4, 8))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_coarsen_with_excess():
x = da.arange(10, chunks=5)
assert eq(coarsen(np.min, x, {0: 3}, trim_excess=True),
np.array([0, 5]))
assert eq(coarsen(np.sum, x, {0: 3}, trim_excess=True),
np.array([0+1+2, 5+6+7]))
def test_insert():
x = np.random.randint(10, size=(10, 10))
a = from_array(x, chunks=(5, 5))
y = np.random.randint(10, size=(5, 10))
b = from_array(y, chunks=(4, 4))
assert eq(np.insert(x, 0, -1, axis=0), insert(a, 0, -1, axis=0))
assert eq(np.insert(x, 3, -1, axis=-1), insert(a, 3, -1, axis=-1))
assert eq(np.insert(x, 5, -1, axis=1), insert(a, 5, -1, axis=1))
assert eq(np.insert(x, -1, -1, axis=-2), insert(a, -1, -1, axis=-2))
assert eq(np.insert(x, [2, 3, 3], -1, axis=1),
insert(a, [2, 3, 3], -1, axis=1))
assert eq(np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
assert eq(np.insert(x, slice(1, 4), -1, axis=1),
insert(a, slice(1, 4), -1, axis=1))
assert eq(np.insert(x, [2] * 3 + [5] * 2, y, axis=0),
insert(a, [2] * 3 + [5] * 2, b, axis=0))
assert eq(np.insert(x, 0, y[0], axis=1),
insert(a, 0, b[0], axis=1))
assert raises(NotImplementedError, lambda: insert(a, [4, 2], -1, axis=0))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=2))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=-3))
assert same_keys(insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
def test_multi_insert():
z = np.random.randint(10, size=(1, 2))
c = from_array(z, chunks=(1, 2))
assert eq(np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),
insert(insert(c, [0, 1], -1, axis=0), [1], -1, axis=1))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
assert eq(chunk.broadcast_to(x, shape),
broadcast_to(a, shape))
assert raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
assert raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_ravel():
x = np.random.randint(10, size=(4, 6))
# 2d
# these should use the shortcut
for chunks in [(4, 6), (2, 6)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks in [(4, 2), (2, 2)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) > len(a.dask) + len(a.chunks[0])
# 0d
assert eq(x[0, 0].ravel(), a[0, 0].ravel())
# 1d
a_flat = a.ravel()
assert a_flat.ravel() is a_flat
# 3d
x = np.random.randint(10, size=(2, 3, 4))
for chunks in [2, 4, (2, 3, 2), (1, 3, 4)]:
a = from_array(x, chunks=chunks)
assert eq(x.ravel(), a.ravel())
assert eq(x.flatten(), a.flatten())
assert eq(np.ravel(x), da.ravel(a))
def test_unravel():
x = np.random.randint(10, size=24)
# these should use the shortcut
for chunks, shape in [(24, (3, 8)),
(24, (12, 2)),
(6, (4, 6)),
(6, (4, 3, 2)),
(6, (4, 6, 1)),
(((6, 12, 6),), (4, 6))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks, shape in [(6, (2, 12)),
(6, (1, 4, 6)),
(6, (2, 1, 12))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) > len(a.dask) + len(a.chunks[0])
assert raises(AssertionError, lambda: unravel(unraveled, (3, 8)))
assert unravel(a, a.shape) is a
def test_reshape():
shapes = [(24,), (2, 12), (2, 3, 4)]
for original_shape in shapes:
for new_shape in shapes:
for chunks in [2, 4, 12]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks)
assert eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (100,)))
assert eq(x.reshape(*new_shape), a.reshape(*new_shape))
assert eq(np.reshape(x, new_shape), reshape(a, new_shape))
# verify we can reshape a single chunk array without too many tasks
x = np.random.randint(10, size=(10, 20))
a = from_array(x, 20) # all one chunk
reshaped = a.reshape((20, 10))
assert eq(x.reshape((20, 10)), reshaped)
assert len(reshaped.dask) == len(a.dask) + 2
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 4)
assert eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (-1, -1)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert eq(d, np.full((3, 4), 2))
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert eq(e, x + 1)
e = d.map_blocks(inc, name='increment')
assert e.name == 'increment'
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
out = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert eq(out, expected)
assert same_keys(d.map_blocks(func, dtype='i8'), out)
def test_map_blocks_with_constants():
d = da.arange(10, chunks=3)
e = d.map_blocks(add, 100, dtype=d.dtype)
assert eq(e, np.arange(10) + 100)
assert eq(da.map_blocks(sub, d, 10, dtype=d.dtype),
np.arange(10) - 10)
assert eq(da.map_blocks(sub, 10, d, dtype=d.dtype),
10 - np.arange(10))
def test_map_blocks_with_kwargs():
d = da.arange(10, chunks=5)
assert eq(d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype),
np.array([4, 9]))
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8')
assert eq(d, np.fromfunction(f, shape=(5, 5)))
assert same_keys(d, fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8'))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert d.name[:5] in repr(d)
assert str(d.shape) in repr(d)
assert str(d._dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert eq(d[..., 1], x[..., 1])
assert eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert eq(d[np.arange(8)], x)
assert eq(d[np.ones(8, dtype=bool)], x)
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
assert raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,)*10, (3,))
assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)
def test_coerce():
d = da.from_array(np.array([1]), chunks=(1,))
with dask.set_options(get=dask.get):
assert bool(d)
assert int(d)
assert float(d)
assert complex(d)
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
store([a, b], [at, bt])
assert (at == 2).all()
assert (bt == 3).all()
assert raises(ValueError, lambda: store([a], [at, bt]))
assert raises(ValueError, lambda: store(at, at))
assert raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_to_hdf5():
try:
import h5py
except ImportError:
return
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype='i4')
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=None)
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks is None
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=(1, 1))
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile('.hdf5') as fn:
da.to_hdf5(fn, {'/x': x, '/y': y})
with h5py.File(fn) as f:
assert eq(f['/x'][:], x)
assert f['/x'].chunks == (2, 2)
assert eq(f['/y'][:], y)
assert f['/y'].chunks == (2,)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_unique():
x = np.array([1, 2, 4, 4, 5, 2])
d = da.from_array(x, chunks=(3,))
assert eq(da.unique(d), np.unique(x))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert eq(a._dtype, x.dtype)
assert eq(b._dtype, y.dtype)
assert eq((a + 1)._dtype, (x + 1).dtype)
assert eq((a + b)._dtype, (x + y).dtype)
assert eq(a.T._dtype, x.T.dtype)
assert eq(a[:3]._dtype, x[:3].dtype)
assert eq((a.dot(b.T))._dtype, (x.dot(y.T)).dtype)
assert eq(stack([a, b])._dtype, np.vstack([x, y]).dtype)
assert eq(concatenate([a, b])._dtype, np.concatenate([x, y]).dtype)
assert eq(b.std()._dtype, y.std().dtype)
assert eq(c.sum()._dtype, z.sum().dtype)
assert eq(a.min()._dtype, a.min().dtype)
assert eq(b.std()._dtype, b.std().dtype)
assert eq(a.argmin(axis=0)._dtype, a.argmin(axis=0).dtype)
assert eq(da.sin(c)._dtype, np.sin(z).dtype)
assert eq(da.exp(b)._dtype, np.exp(y).dtype)
assert eq(da.floor(a)._dtype, np.floor(x).dtype)
assert eq(da.isnan(b)._dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b)._dtype == 'bool'
assert da.notnull(b)._dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert eq(d['text']._dtype, x['text'].dtype)
assert eq(d[['numbers', 'text']]._dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones(5, dtype='f4')
d = da.from_array(x, chunks=(2,))
assert d.astype('i8')._dtype == 'i8'
assert eq(d.astype('i8'), x.astype('i8'))
assert same_keys(d.astype('i8'), d.astype('i8'))
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert eq(a + b, x + y)
assert eq(a * b, x * y)
assert eq(a - b, x - y)
assert eq(a / b, x / y)
assert eq(b & b, y & y)
assert eq(b | b, y | y)
assert eq(b ^ b, y ^ y)
assert eq(a // b, x // y)
assert eq(a ** b, x ** y)
assert eq(a % b, x % y)
assert eq(a > b, x > y)
assert eq(a < b, x < y)
assert eq(a >= b, x >= y)
assert eq(a <= b, x <= y)
assert eq(a == b, x == y)
assert eq(a != b, x != y)
assert eq(a + 2, x + 2)
assert eq(a * 2, x * 2)
assert eq(a - 2, x - 2)
assert eq(a / 2, x / 2)
assert eq(b & True, y & True)
assert eq(b | True, y | True)
assert eq(b ^ True, y ^ True)
assert eq(a // 2, x // 2)
assert eq(a ** 2, x ** 2)
assert eq(a % 2, x % 2)
assert eq(a > 2, x > 2)
assert eq(a < 2, x < 2)
assert eq(a >= 2, x >= 2)
assert eq(a <= 2, x <= 2)
assert eq(a == 2, x == 2)
assert eq(a != 2, x != 2)
assert eq(2 + b, 2 + y)
assert eq(2 * b, 2 * y)
assert eq(2 - b, 2 - y)
assert eq(2 / b, 2 / y)
assert eq(True & b, True & y)
assert eq(True | b, True | y)
assert eq(True ^ b, True ^ y)
assert eq(2 // b, 2 // y)
assert eq(2 ** b, 2 ** y)
assert eq(2 % b, 2 % y)
assert eq(2 > b, 2 > y)
assert eq(2 < b, 2 < y)
assert eq(2 >= b, 2 >= y)
assert eq(2 <= b, 2 <= y)
assert eq(2 == b, 2 == y)
assert eq(2 != b, 2 != y)
assert eq(-a, -x)
assert eq(abs(a), abs(x))
assert eq(~(a == b), ~(x == y))
assert eq(~(a == b), ~(x == y))
assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
assert eq(da.exp(b), np.exp(y))
assert eq(da.log(a), np.log(x))
assert eq(da.log10(a), np.log10(x))
assert eq(da.log1p(a), np.log1p(x))
assert eq(da.expm1(b), np.expm1(y))
assert eq(da.sqrt(a), np.sqrt(x))
assert eq(da.square(a), np.square(x))
assert eq(da.sin(a), np.sin(x))
assert eq(da.cos(b), np.cos(y))
assert eq(da.tan(a), np.tan(x))
assert eq(da.arcsin(b/10), np.arcsin(y/10))
assert eq(da.arccos(b/10), np.arccos(y/10))
assert eq(da.arctan(b/10), np.arctan(y/10))
assert eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
assert eq(da.hypot(b, a), np.hypot(y, x))
assert eq(da.sinh(a), np.sinh(x))
assert eq(da.cosh(b), np.cosh(y))
assert eq(da.tanh(a), np.tanh(x))
assert eq(da.arcsinh(b*10), np.arcsinh(y*10))
assert eq(da.arccosh(b*10), np.arccosh(y*10))
assert eq(da.arctanh(b/10), np.arctanh(y/10))
assert eq(da.deg2rad(a), np.deg2rad(x))
assert eq(da.rad2deg(a), np.rad2deg(x))
assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert eq(da.isfinite(a), np.isfinite(x))
assert eq(da.isinf(a), np.isinf(x))
assert eq(da.isnan(a), np.isnan(x))
assert eq(da.signbit(a - 3), np.signbit(x - 3))
assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
assert eq(da.ldexp(c, c), np.ldexp(z, z))
assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert eq(da.ceil(a), np.ceil(x))
assert eq(da.trunc(a / 2), np.trunc(x / 2))
assert eq(da.degrees(b), np.degrees(y))
assert eq(da.radians(a), np.radians(x))
assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert eq(da.angle(a + 1j), np.angle(x + 1j))
assert eq(da.real(a + 1j), np.real(x + 1j))
assert eq((a + 1j).real, np.real(x + 1j))
assert eq(da.imag(a + 1j), np.imag(x + 1j))
assert eq((a + 1j).imag, np.imag(x + 1j))
assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert eq(da.fabs(b), np.fabs(y))
assert eq(da.sign(b - 2), np.sign(y - 2))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert eq(l1, r1)
assert eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert eq(l1, r1)
assert eq(l2, r2)
assert eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
b = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype='f8'), da.exp(a, dtype='f8'))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr._keys())
assert isinstance(result, dict)
assert all(key in result for key in expr._keys())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = 'i8'
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getarray():
assert type(getarray(np.matrix([[1]]), 0)) == np.ndarray
assert eq(getarray([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert eq(getarray(np.arange(5), (None, slice(None, None))),
np.arange(5)[None, :])
def test_squeeze():
x = da.ones((10, 1), chunks=(3, 1))
assert eq(x.squeeze(), x.compute().squeeze())
assert x.squeeze().chunks == ((3, 3, 3, 1),)
assert same_keys(x.squeeze(), x.squeeze())
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_args():
x = da.ones((10, 2), chunks=(3, 1), dtype='i4') + 1
y = Array(*x._args)
assert eq(x, y)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert isinstance(tasks[0][3], type(Lock()))
assert len(set(task[3] for task in tasks)) == 1
assert eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert eq(e + f, x + x)
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={'n': 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert eq(d.compute(), 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={'n': 2}))
def test_topk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(2, d)
assert e.chunks == ((2,),)
assert eq(e, np.sort(x)[-1:-3:-1])
assert same_keys(da.topk(2, d), e)
def test_topk_k_bigger_than_chunk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(3, d)
assert e.chunks == ((3,),)
assert eq(e, np.array([6, 5, 2]))
def test_bincount():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
e = da.bincount(d, minlength=6)
assert eq(e, np.bincount(x, minlength=6))
assert same_keys(da.bincount(d, minlength=6), e)
def test_bincount_with_weights():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
weights = np.array([1, 2, 1, 0.5, 1])
dweights = da.from_array(weights, chunks=2)
e = da.bincount(d, weights=dweights, minlength=6)
assert eq(e, np.bincount(x, weights=dweights, minlength=6))
assert same_keys(da.bincount(d, weights=dweights, minlength=6), e)
def test_bincount_raises_informative_error_on_missing_minlength_kwarg():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
try:
da.bincount(d)
except Exception as e:
assert 'minlength' in str(e)
else:
assert False
def test_histogram():
# Test for normal, flattened input
n = 100
v = da.random.random(n, chunks=10)
bins = np.arange(0, 1.01, 0.01)
(a1, b1) = da.histogram(v, bins=bins)
(a2, b2) = np.histogram(v, bins=bins)
# Check if the sum of the bins equals the number of samples
assert a2.sum(axis=0) == n
assert a1.sum(axis=0) == n
assert eq(a1, a2)
assert same_keys(da.histogram(v, bins=bins)[0], a1)
def test_histogram_alternative_bins_range():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Other input
(a1, b1) = da.histogram(v, bins=10, range=(0, 1))
(a2, b2) = np.histogram(v, bins=10, range=(0, 1))
assert eq(a1, a2)
assert eq(b1, b2)
def test_histogram_return_type():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Check if return type is same as hist
bins = np.arange(0, 11, 1, dtype='i4')
assert eq(da.histogram(v * 10, bins=bins)[0],
np.histogram(v * 10, bins=bins)[0])
def test_histogram_extra_args_and_shapes():
# Check for extra args and shapes
bins = np.arange(0, 1.01, 0.01)
v = da.random.random(100, chunks=10)
data = [(v, bins, da.ones(100, chunks=v.chunks) * 5),
(da.random.random((50, 50), chunks=10), bins, da.ones((50, 50), chunks=10) * 5)]
for v, bins, w in data:
# density
assert eq(da.histogram(v, bins=bins, normed=True)[0],
np.histogram(v, bins=bins, normed=True)[0])
# normed
assert eq(da.histogram(v, bins=bins, density=True)[0],
np.histogram(v, bins=bins, density=True)[0])
# weights
assert eq(da.histogram(v, bins=bins, weights=w)[0],
np.histogram(v, bins=bins, weights=w)[0])
assert eq(da.histogram(v, bins=bins, weights=w, density=True)[0],
da.histogram(v, bins=bins, weights=w, density=True)[0])
def test_concatenate3():
x = np.array([1, 2])
assert eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (concatenate3([[x, x, x], [x, x, x]]) ==
np.array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])).all()
assert (concatenate3([[x, x], [x, x], [x, x]]) ==
np.array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])).all()
x = np.arange(12).reshape((2, 2, 3))
assert eq(concatenate3([[[x, x, x],
[x, x, x]],
[[x, x, x],
[x, x, x]]]),
np.array([[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]],
[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]]]))
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert eq(da.core.map_blocks(lambda a, b: a+2*b, d, e, dtype=d.dtype),
x + 2*y)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2*b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert eq(res, x + 2*z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert eq(da.map_blocks(func, f, d, dtype=d.dtype),
z + 2*x)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_cache():
x = da.arange(15, chunks=5)
y = 2 * x + 1
z = y.cache()
assert len(z.dask) == 3 # very short graph
assert eq(y, z)
cache = np.empty(15, dtype=y.dtype)
z = y.cache(store=cache)
assert len(z.dask) < 6 # very short graph
assert z.chunks == y.chunks
assert eq(y, z)
def test_take_dask_from_numpy():
x = np.arange(5).astype('f8')
y = da.from_array(np.array([1, 2, 3, 3, 2 ,1]), chunks=3)
z = da.take(x * 2, y)
assert z.chunks == y.chunks
assert eq(z, np.array([2., 4., 6., 6., 4., 2.]))
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask.pydata.org" in str(e)
assert raises(ValueError, lambda: da.ones(6))
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert 'rechunk(2)' in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, out=None)
except TypeError as e:
assert 'minimum' in str(e)
assert 'out' in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
try:
import h5py
except ImportError:
return
with tmpfile('h5') as fn:
with h5py.File(fn) as f:
x = f.create_dataset('/x', shape=(10, 10), dtype='f8')
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(get=get_sync).shape == (1, 10, 10)
assert d[:, None, :].compute(get=get_sync).shape == (10, 1, 10)
assert d[:, :, None].compute(get=get_sync).shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _vindex_transpose, _get_axis
x = np.arange(4*5*6*7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
for i in ind]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
assert raises(IndexError, lambda: d.vindex[0])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [1, 2, 3], 0])
assert raises(IndexError, lambda: d.vindex[[1], [1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [[1], [2], [3]]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]),
np.array([[10, 20, 30], [40, 50, 60]])]
assert (_vindex_merge(locations, values) == np.array([[40, 50, 60],
[1, 2, 3],
[10, 20, 30]])).all()
def test_empty_array():
assert eq(np.arange(0), da.arange(0, chunks=5))
def test_array():
x = np.ones(5, dtype='i4')
d = da.ones(5, chunks=3, dtype='i4')
assert eq(da.array(d, ndmin=3, dtype='i8'),
np.array(x, ndmin=3, dtype='i8'))
def test_cov():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(4, 4))
assert eq(da.cov(d), np.cov(x))
assert eq(da.cov(d, rowvar=0), np.cov(x, rowvar=0))
assert eq(da.cov(d, ddof=10), np.cov(x, ddof=10))
assert eq(da.cov(d, bias=1), np.cov(x, bias=1))
assert eq(da.cov(d, d), np.cov(x, x))
y = np.arange(8)
e = da.from_array(y, chunks=(4,))
assert eq(da.cov(d, e), np.cov(x, y))
assert eq(da.cov(e, d), np.cov(y, x))
assert raises(ValueError, lambda: da.cov(d, ddof=1.5))
def test_memmap():
with tmpfile('npy') as fn_1:
with tmpfile('npy') as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode='w+', dtype=x.dtype)
x.store(target)
assert eq(target, x)
np.save(fn_2, target)
assert eq(np.load(fn_2, mmap_mode='r'), x)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5*10*10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
dirname = mkdtemp()
try:
da.to_npy_stack(dirname, d, axis=0)
assert os.path.exists(os.path.join(dirname, '0.npy'))
assert (np.load(os.path.join(dirname, '1.npy')) == x[2:4]).all()
e = da.from_npy_stack(dirname)
assert eq(d, e)
finally:
shutil.rmtree(dirname)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert eq(x.view('i4'), d.view('i4'))
assert eq(x.view('i2'), d.view('i2'))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(4,))
assert eq(x.view('i4'), d.view('i4'))
with pytest.raises(ValueError):
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(3,))
d.view('i4')
with pytest.raises(ValueError):
d.view('i4', order='asdf')
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert eq(x.view('i4'), d.view('i4', order='F'))
assert eq(x.view('i2'), d.view('i2', order='F'))
def test_h5py_tokenize():
h5py = pytest.importorskip('h5py')
with tmpfile('hdf5') as fn1:
with tmpfile('hdf5') as fn2:
f = h5py.File(fn1)
g = h5py.File(fn2)
f['x'] = np.arange(10).astype(float)
g['x'] = np.ones(10).astype(float)
x1 = f['x']
x2 = g['x']
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0,
dtype=d.dtype)
assert e.ndim == 1
assert e.chunks == ((4, 4),)
assert eq(e, x.sum(axis=0))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1), new_axis=[0, 3], dtype=d.dtype)
assert e.ndim == 4
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert eq(e, x[None, :, :, None])
def test_broadcast_chunks():
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((1,), (5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((3, 3,), (5, 5),)
with pytest.raises(ValueError):
broadcast_chunks(a, b)
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
| {
"repo_name": "vikhyat/dask",
"path": "dask/array/tests/test_array_core.py",
"copies": "1",
"size": "53268",
"license": "bsd-3-clause",
"hash": -5817916251116970000,
"line_mean": 29.5085910653,
"line_max": 92,
"alpha_frac": 0.5073214688,
"autogenerated": false,
"ratio": 2.707395171537484,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8709700798651009,
"avg_score": 0.001003168337295115,
"num_lines": 1746
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
from operator import add, sub
import os
import shutil
import time
from toolz import merge, countby
from toolz.curried import identity
import dask
import dask.array as da
from dask.delayed import delayed
from dask.async import get_sync
from dask.array.core import *
from dask.utils import raises, ignoring, tmpfile, tmpdir
from dask.array.utils import assert_eq
# temporary until numpy functions migrated
try:
from numpy import nancumsum, nancumprod
except ImportError: # pragma: no cover
import dask.array.numpy_compat as npcompat
nancumsum = npcompat.nancumsum
nancumprod = npcompat.nancumprod
def inc(x):
return x + 1
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_getem():
assert getem('X', (2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_concatenate3_on_scalars():
assert_eq(concatenate3([1, 2]), np.array([1, 2]))
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert_eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert_eq(concatenate3(out), x.T + 1)
def test_transpose():
x = np.arange(240).reshape((4, 6, 10))
d = da.from_array(x, (2, 3, 4))
assert_eq(d.transpose((2, 0, 1)),
x.transpose((2, 0, 1)))
assert same_keys(d.transpose((2, 0, 1)), d.transpose((2, 0, 1)))
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10))
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3))
assert_eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == (getitem, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getitem, ('C', 1, 0),
(None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, ('B', 1, 0),
(colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, ('C', 1, 1),
(colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == \
stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
assert Array._get(s.dask, s._keys())[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).chunks == \
concatenate([a, b, c], axis=1).chunks
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_concatenate_fixlen_strings():
x = np.array(['a', 'b', 'c'])
y = np.array(['aa', 'bb', 'cc'])
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
assert_eq(np.concatenate([x, y]),
da.concatenate([a, b]))
def test_vstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert_eq(np.vstack((x, y)), da.vstack((a, b)))
assert_eq(np.vstack((x, y[None, :])), da.vstack((a, b[None, :])))
def test_hstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert_eq(np.hstack((x[None, :], y[None, :])),
da.hstack((a[None, :], b[None, :])))
assert_eq(np.hstack((x, y)), da.hstack((a, b)))
def test_dstack():
x = np.arange(5)
y = np.ones(5)
a = da.arange(5, chunks=2)
b = da.ones(5, chunks=2)
assert_eq(np.dstack((x[None, None, :], y[None, None, :])),
da.dstack((a[None, None, :], b[None, None, :])))
assert_eq(np.dstack((x[None, :], y[None, :])),
da.dstack((a[None, :], b[None, :])))
assert_eq(np.dstack((x, y)), da.dstack((a, b)))
def test_take():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert_eq(np.take(x, 3, axis=0), take(a, 3, axis=0))
assert_eq(np.take(x, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
assert raises(ValueError, lambda: take(a, 3, axis=2))
assert same_keys(take(a, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
def test_compress():
x = np.arange(25).reshape((5, 5))
a = from_array(x, chunks=(2, 2))
assert_eq(np.compress([True, False, True, False, True], x, axis=0),
da.compress([True, False, True, False, True], a, axis=0))
assert_eq(np.compress([True, False, True, False, True], x, axis=1),
da.compress([True, False, True, False, True], a, axis=1))
assert_eq(np.compress([True, False], x, axis=1),
da.compress([True, False], a, axis=1))
with pytest.raises(NotImplementedError):
da.compress([True, False], a)
with pytest.raises(ValueError):
da.compress([True, False], a, axis=100)
with pytest.raises(ValueError):
da.compress([[True], [False]], a, axis=100)
def test_binops():
a = Array(dict((('a', i), np.array([''])) for i in range(3)),
'a', chunks=((1, 1, 1),))
b = Array(dict((('b', i), np.array([''])) for i in range(3)),
'b', chunks=((1, 1, 1),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert "'a', 0" in str(result.dask[('c', 0)])
assert "2" in str(result.dask[('c', 0)])
def test_isnull():
x = np.array([1, np.nan])
a = from_array(x, chunks=(2,))
with ignoring(ImportError):
assert_eq(isnull(a), np.isnan(x))
assert_eq(notnull(a), ~np.isnan(x))
def test_isclose():
x = np.array([0, np.nan, 1, 1.5])
y = np.array([1e-9, np.nan, 1, 2])
a = from_array(x, chunks=(2,))
b = from_array(y, chunks=(2,))
assert_eq(da.isclose(a, b, equal_nan=True),
np.isclose(x, y, equal_nan=True))
def test_broadcast_shapes():
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
assert raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
assert raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 2
assert_eq(a.sum()**2, x.sum()**2)
x = np.arange(11)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 3
assert_eq(a, x)
def test_partial_by_order():
assert partial_by_order(5, function=add, other=[(1, 20)]) == 25
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert_eq(x + a, 2 * x)
assert_eq(a + x, 2 * x)
assert_eq(x + b, x + y)
assert_eq(b + x, x + y)
assert_eq(a + y, x + y)
assert_eq(y + a, x + y)
# Error on shape mismatch
assert raises(ValueError, lambda: a + y.T)
assert raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert_eq(a + b, x + y)
assert_eq(b + a, x + y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert_eq(c, x + 1)
c = a + b
assert_eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert_eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert_eq(c, np.exp(x))
assert_eq(abs(-a), a)
assert_eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert_eq(x + 1, a + 1) # still float32
assert_eq(x + 1e50, a + 1e50) # now float64
assert_eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert_eq(y['a'], x['a'])
assert_eq(y[['b', 'a']], x[['b', 'a']])
assert same_keys(y[['b', 'a']], y[['b', 'a']])
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 4))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(4, 5))
for axes in [1, (1, 0)]:
assert_eq(tensordot(a, b, axes=axes), np.tensordot(x, y, axes=axes))
assert_eq(tensordot(x, b, axes=axes), np.tensordot(x, y, axes=axes))
assert_eq(tensordot(a, y, axes=axes), np.tensordot(x, y, axes=axes))
assert same_keys(tensordot(a, b, axes=(1, 0)), tensordot(a, b, axes=(1, 0)))
assert not same_keys(tensordot(a, b, axes=0), tensordot(a, b, axes=1))
# assert (tensordot(a, a).chunks
# == tensordot(a, a, axes=((1, 0), (0, 1))).chunks)
# assert_eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert_eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert_eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, chunks=(5, 5))
assert_eq(b.vnorm(), np.linalg.norm(a))
assert_eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert_eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
split_every = {0: 3, 1: 3}
assert_eq(b.vnorm(ord=1, axis=0, split_every=split_every),
np.linalg.norm(a, ord=1, axis=0))
assert_eq(b.vnorm(ord=np.inf, axis=0, split_every=split_every),
np.linalg.norm(a, ord=np.inf, axis=0))
assert_eq(b.vnorm(ord=np.inf, split_every=split_every),
np.linalg.norm(a.flatten(), ord=np.inf))
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
assert_eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert_eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, chunks=(4,))
assert_eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert_eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_where_has_informative_error():
x = da.ones(5, chunks=3)
try:
result = da.where(x > 0)
except Exception as e:
assert 'dask' in str(e)
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, chunks=(4, 8))
assert_eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert_eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_coarsen_with_excess():
x = da.arange(10, chunks=5)
assert_eq(coarsen(np.min, x, {0: 3}, trim_excess=True),
np.array([0, 5]))
assert_eq(coarsen(np.sum, x, {0: 3}, trim_excess=True),
np.array([0+1+2, 5+6+7]))
def test_insert():
x = np.random.randint(10, size=(10, 10))
a = from_array(x, chunks=(5, 5))
y = np.random.randint(10, size=(5, 10))
b = from_array(y, chunks=(4, 4))
assert_eq(np.insert(x, 0, -1, axis=0), insert(a, 0, -1, axis=0))
assert_eq(np.insert(x, 3, -1, axis=-1), insert(a, 3, -1, axis=-1))
assert_eq(np.insert(x, 5, -1, axis=1), insert(a, 5, -1, axis=1))
assert_eq(np.insert(x, -1, -1, axis=-2), insert(a, -1, -1, axis=-2))
assert_eq(np.insert(x, [2, 3, 3], -1, axis=1),
insert(a, [2, 3, 3], -1, axis=1))
assert_eq(np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
assert_eq(np.insert(x, slice(1, 4), -1, axis=1),
insert(a, slice(1, 4), -1, axis=1))
assert_eq(np.insert(x, [2] * 3 + [5] * 2, y, axis=0),
insert(a, [2] * 3 + [5] * 2, b, axis=0))
assert_eq(np.insert(x, 0, y[0], axis=1),
insert(a, 0, b[0], axis=1))
assert raises(NotImplementedError, lambda: insert(a, [4, 2], -1, axis=0))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=2))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=-3))
assert same_keys(insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
def test_multi_insert():
z = np.random.randint(10, size=(1, 2))
c = from_array(z, chunks=(1, 2))
assert_eq(np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),
insert(insert(c, [0, 1], -1, axis=0), [1], -1, axis=1))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
assert_eq(chunk.broadcast_to(x, shape),
broadcast_to(a, shape))
assert raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
assert raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_ravel():
x = np.random.randint(10, size=(4, 6))
# 2d
# these should use the shortcut
for chunks in [(4, 6), (2, 6)]:
a = from_array(x, chunks=chunks)
assert_eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks in [(4, 2), (2, 2)]:
a = from_array(x, chunks=chunks)
assert_eq(x.ravel(), a.ravel())
assert len(a.ravel().dask) > len(a.dask) + len(a.chunks[0])
# 0d
assert_eq(x[0, 0].ravel(), a[0, 0].ravel())
# 1d
a_flat = a.ravel()
assert a_flat.ravel() is a_flat
# 3d
x = np.random.randint(10, size=(2, 3, 4))
for chunks in [2, 4, (2, 3, 2), (1, 3, 4)]:
a = from_array(x, chunks=chunks)
assert_eq(x.ravel(), a.ravel())
assert_eq(x.flatten(), a.flatten())
assert_eq(np.ravel(x), da.ravel(a))
def test_unravel():
x = np.random.randint(10, size=24)
# these should use the shortcut
for chunks, shape in [(24, (3, 8)),
(24, (12, 2)),
(6, (4, 6)),
(6, (4, 3, 2)),
(6, (4, 6, 1)),
(((6, 12, 6),), (4, 6))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert_eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) == len(a.dask) + len(a.chunks[0])
# these cannot
for chunks, shape in [(6, (2, 12)),
(6, (1, 4, 6)),
(6, (2, 1, 12))]:
a = from_array(x, chunks=chunks)
unraveled = unravel(a, shape)
assert_eq(x.reshape(*shape), unraveled)
assert len(unraveled.dask) > len(a.dask) + len(a.chunks[0])
assert raises(AssertionError, lambda: unravel(unraveled, (3, 8)))
assert unravel(a, a.shape) is a
def test_reshape():
shapes = [(24,), (2, 12), (2, 3, 4)]
for original_shape in shapes:
for new_shape in shapes:
for chunks in [2, 4, 12]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks)
assert_eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (100,)))
assert_eq(x.reshape(*new_shape), a.reshape(*new_shape))
assert_eq(np.reshape(x, new_shape), reshape(a, new_shape))
# verify we can reshape a single chunk array without too many tasks
x = np.random.randint(10, size=(10, 20))
a = from_array(x, 20) # all one chunk
reshaped = a.reshape((20, 10))
assert_eq(x.reshape((20, 10)), reshaped)
assert len(reshaped.dask) == len(a.dask) + 2
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 4)
assert_eq(x.reshape(new_shape), a.reshape(new_shape))
assert raises(ValueError, lambda: reshape(a, (-1, -1)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert_eq(d, np.full((3, 4), 2))
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert_eq(e, x + 1)
e = d.map_blocks(inc, name='increment')
assert e.name == 'increment'
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert_eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert_eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
out = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype='i8'), out)
def test_map_blocks_with_constants():
d = da.arange(10, chunks=3)
e = d.map_blocks(add, 100, dtype=d.dtype)
assert_eq(e, np.arange(10) + 100)
assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype),
np.arange(10) - 10)
assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype),
10 - np.arange(10))
def test_map_blocks_with_kwargs():
d = da.arange(10, chunks=5)
assert_eq(d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype),
np.array([4, 9]))
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8')
assert_eq(d, np.fromfunction(f, shape=(5, 5)))
assert same_keys(d, fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8'))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert d.name[:5] in repr(d)
assert str(d.shape) in repr(d)
assert str(d._dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert_eq(d[..., 1], x[..., 1])
assert_eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert_eq(d[np.arange(8)], x)
assert_eq(d[np.ones(8, dtype=bool)], x)
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
assert raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,)*10, (3,))
assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)
def test_coerce():
d = da.from_array(np.array([1]), chunks=(1,))
with dask.set_options(get=dask.get):
assert bool(d)
assert int(d)
assert float(d)
assert complex(d)
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
store([a, b], [at, bt])
assert (at == 2).all()
assert (bt == 3).all()
assert raises(ValueError, lambda: store([a], [at, bt]))
assert raises(ValueError, lambda: store(at, at))
assert raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
assert (at == 0).all() and (bt == 0).all()
v.compute()
assert (at == 2).all() and (bt == 3).all()
class ThreadSafetyError(Exception):
pass
class NonthreadSafeStore(object):
def __init__(self):
self.in_use = False
def __setitem__(self, key, value):
if self.in_use:
raise ThreadSafetyError()
self.in_use = True
time.sleep(0.001)
self.in_use = False
class ThreadSafeStore(object):
def __init__(self):
self.concurrent_uses = 0
self.max_concurrent_uses = 0
def __setitem__(self, key, value):
self.concurrent_uses += 1
self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)
time.sleep(0.01)
self.concurrent_uses -= 1
def test_store_locks():
_Lock = type(Lock())
d = da.ones((10, 10), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
dsk = v.dask
locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))
assert locks == set([lock])
# Ensure same lock applies over multiple stores
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock,
get=dask.threaded.get, num_workers=10)
# Don't assume thread safety by default
at = NonthreadSafeStore()
store(a, at, get=dask.threaded.get, num_workers=10)
a.store(at, get=dask.threaded.get, num_workers=10)
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
a.store(at, lock=False, get=dask.threaded.get, num_workers=10)
if at.max_concurrent_uses > 1:
break
if i == 9:
assert False
def test_to_hdf5():
h5py = pytest.importorskip('h5py')
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype='i4')
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=None)
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks is None
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=(1, 1))
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile('.hdf5') as fn:
da.to_hdf5(fn, {'/x': x, '/y': y})
with h5py.File(fn) as f:
assert_eq(f['/x'][:], x)
assert f['/x'].chunks == (2, 2)
assert_eq(f['/y'][:], y)
assert f['/y'].chunks == (2,)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert_eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_unique():
x = np.array([1, 2, 4, 4, 5, 2])
d = da.from_array(x, chunks=(3,))
assert_eq(da.unique(d), np.unique(x))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def assert_eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert_eq(a._dtype, x.dtype)
assert_eq(b._dtype, y.dtype)
assert_eq((a + 1)._dtype, (x + 1).dtype)
assert_eq((a + b)._dtype, (x + y).dtype)
assert_eq(a.T._dtype, x.T.dtype)
assert_eq(a[:3]._dtype, x[:3].dtype)
assert_eq((a.dot(b.T))._dtype, (x.dot(y.T)).dtype)
assert_eq(stack([a, b])._dtype, np.vstack([x, y]).dtype)
assert_eq(concatenate([a, b])._dtype, np.concatenate([x, y]).dtype)
assert_eq(b.std()._dtype, y.std().dtype)
assert_eq(c.sum()._dtype, z.sum().dtype)
assert_eq(a.min()._dtype, a.min().dtype)
assert_eq(b.std()._dtype, b.std().dtype)
assert_eq(a.argmin(axis=0)._dtype, a.argmin(axis=0).dtype)
assert_eq(da.sin(c)._dtype, np.sin(z).dtype)
assert_eq(da.exp(b)._dtype, np.exp(y).dtype)
assert_eq(da.floor(a)._dtype, np.floor(x).dtype)
assert_eq(da.isnan(b)._dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b)._dtype == 'bool'
assert da.notnull(b)._dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert_eq(d['text']._dtype, x['text'].dtype)
assert_eq(d[['numbers', 'text']]._dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones(5, dtype='f4')
d = da.from_array(x, chunks=(2,))
assert d.astype('i8')._dtype == 'i8'
assert_eq(d.astype('i8'), x.astype('i8'))
assert same_keys(d.astype('i8'), d.astype('i8'))
assert d.astype(d.dtype) is d
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert_eq(a + b, x + y)
assert_eq(a * b, x * y)
assert_eq(a - b, x - y)
assert_eq(a / b, x / y)
assert_eq(b & b, y & y)
assert_eq(b | b, y | y)
assert_eq(b ^ b, y ^ y)
assert_eq(a // b, x // y)
assert_eq(a ** b, x ** y)
assert_eq(a % b, x % y)
assert_eq(a > b, x > y)
assert_eq(a < b, x < y)
assert_eq(a >= b, x >= y)
assert_eq(a <= b, x <= y)
assert_eq(a == b, x == y)
assert_eq(a != b, x != y)
assert_eq(a + 2, x + 2)
assert_eq(a * 2, x * 2)
assert_eq(a - 2, x - 2)
assert_eq(a / 2, x / 2)
assert_eq(b & True, y & True)
assert_eq(b | True, y | True)
assert_eq(b ^ True, y ^ True)
assert_eq(a // 2, x // 2)
assert_eq(a ** 2, x ** 2)
assert_eq(a % 2, x % 2)
assert_eq(a > 2, x > 2)
assert_eq(a < 2, x < 2)
assert_eq(a >= 2, x >= 2)
assert_eq(a <= 2, x <= 2)
assert_eq(a == 2, x == 2)
assert_eq(a != 2, x != 2)
assert_eq(2 + b, 2 + y)
assert_eq(2 * b, 2 * y)
assert_eq(2 - b, 2 - y)
assert_eq(2 / b, 2 / y)
assert_eq(True & b, True & y)
assert_eq(True | b, True | y)
assert_eq(True ^ b, True ^ y)
assert_eq(2 // b, 2 // y)
assert_eq(2 ** b, 2 ** y)
assert_eq(2 % b, 2 % y)
assert_eq(2 > b, 2 > y)
assert_eq(2 < b, 2 < y)
assert_eq(2 >= b, 2 >= y)
assert_eq(2 <= b, 2 <= y)
assert_eq(2 == b, 2 == y)
assert_eq(2 != b, 2 != y)
assert_eq(-a, -x)
assert_eq(abs(a), abs(x))
assert_eq(~(a == b), ~(x == y))
assert_eq(~(a == b), ~(x == y))
assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
assert_eq(da.exp(b), np.exp(y))
assert_eq(da.log(a), np.log(x))
assert_eq(da.log10(a), np.log10(x))
assert_eq(da.log1p(a), np.log1p(x))
assert_eq(da.expm1(b), np.expm1(y))
assert_eq(da.sqrt(a), np.sqrt(x))
assert_eq(da.square(a), np.square(x))
assert_eq(da.sin(a), np.sin(x))
assert_eq(da.cos(b), np.cos(y))
assert_eq(da.tan(a), np.tan(x))
assert_eq(da.arcsin(b/10), np.arcsin(y/10))
assert_eq(da.arccos(b/10), np.arccos(y/10))
assert_eq(da.arctan(b/10), np.arctan(y/10))
assert_eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
assert_eq(da.hypot(b, a), np.hypot(y, x))
assert_eq(da.sinh(a), np.sinh(x))
assert_eq(da.cosh(b), np.cosh(y))
assert_eq(da.tanh(a), np.tanh(x))
assert_eq(da.arcsinh(b*10), np.arcsinh(y*10))
assert_eq(da.arccosh(b*10), np.arccosh(y*10))
assert_eq(da.arctanh(b/10), np.arctanh(y/10))
assert_eq(da.deg2rad(a), np.deg2rad(x))
assert_eq(da.rad2deg(a), np.rad2deg(x))
assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert_eq(da.isfinite(a), np.isfinite(x))
assert_eq(da.isinf(a), np.isinf(x))
assert_eq(da.isnan(a), np.isnan(x))
assert_eq(da.signbit(a - 3), np.signbit(x - 3))
assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
assert_eq(da.ldexp(c, c), np.ldexp(z, z))
assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert_eq(da.ceil(a), np.ceil(x))
assert_eq(da.trunc(a / 2), np.trunc(x / 2))
assert_eq(da.degrees(b), np.degrees(y))
assert_eq(da.radians(a), np.radians(x))
assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert_eq(da.angle(a + 1j), np.angle(x + 1j))
assert_eq(da.real(a + 1j), np.real(x + 1j))
assert_eq((a + 1j).real, np.real(x + 1j))
assert_eq(da.imag(a + 1j), np.imag(x + 1j))
assert_eq((a + 1j).imag, np.imag(x + 1j))
assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert_eq(da.fabs(b), np.fabs(y))
assert_eq(da.sign(b - 2), np.sign(y - 2))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
assert_eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
b = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype='f8'), da.exp(a, dtype='f8'))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr._keys())
assert isinstance(result, dict)
assert all(key in result for key in expr._keys())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = 'i8'
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getarray():
assert type(getarray(np.matrix([[1]]), 0)) == np.ndarray
assert_eq(getarray([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert_eq(getarray(np.arange(5), (None, slice(None, None))),
np.arange(5)[None, :])
def test_squeeze():
x = da.ones((10, 1), chunks=(3, 1))
assert_eq(x.squeeze(), x.compute().squeeze())
assert x.squeeze().chunks == ((3, 3, 3, 1),)
assert same_keys(x.squeeze(), x.squeeze())
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_args():
x = da.ones((10, 2), chunks=(3, 1), dtype='i4') + 1
y = Array(*x._args)
assert_eq(x, y)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert isinstance(tasks[0][3], type(Lock()))
assert len(set(task[3] for task in tasks)) == 1
assert_eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert_eq(e + f, x + x)
def test_from_array_slicing_results_in_ndarray():
x = np.matrix(np.arange(100).reshape((10, 10)))
dx = da.from_array(x, chunks=(5, 5))
s1 = dx[0:5]
assert type(dx[0:5].compute()) == np.ndarray
s2 = s1[0:3]
assert type(s2.compute()) == np.ndarray
s3 = s2[:, 0]
assert type(s3.compute()) == np.ndarray
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={'n': 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert_eq(d.compute(), 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={'n': 2}))
def test_topk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(2, d)
assert e.chunks == ((2,),)
assert_eq(e, np.sort(x)[-1:-3:-1])
assert same_keys(da.topk(2, d), e)
def test_topk_k_bigger_than_chunk():
x = np.array([5, 2, 1, 6])
d = da.from_array(x, chunks=2)
e = da.topk(3, d)
assert e.chunks == ((3,),)
assert_eq(e, np.array([6, 5, 2]))
def test_bincount():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
e = da.bincount(d, minlength=6)
assert_eq(e, np.bincount(x, minlength=6))
assert same_keys(da.bincount(d, minlength=6), e)
def test_bincount_with_weights():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
weights = np.array([1, 2, 1, 0.5, 1])
dweights = da.from_array(weights, chunks=2)
e = da.bincount(d, weights=dweights, minlength=6)
assert_eq(e, np.bincount(x, weights=dweights, minlength=6))
assert same_keys(da.bincount(d, weights=dweights, minlength=6), e)
def test_bincount_raises_informative_error_on_missing_minlength_kwarg():
x = np.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2)
try:
da.bincount(d)
except Exception as e:
assert 'minlength' in str(e)
else:
assert False
def test_histogram():
# Test for normal, flattened input
n = 100
v = da.random.random(n, chunks=10)
bins = np.arange(0, 1.01, 0.01)
(a1, b1) = da.histogram(v, bins=bins)
(a2, b2) = np.histogram(v, bins=bins)
# Check if the sum of the bins equals the number of samples
assert a2.sum(axis=0) == n
assert a1.sum(axis=0) == n
assert_eq(a1, a2)
assert same_keys(da.histogram(v, bins=bins)[0], a1)
def test_histogram_alternative_bins_range():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Other input
(a1, b1) = da.histogram(v, bins=10, range=(0, 1))
(a2, b2) = np.histogram(v, bins=10, range=(0, 1))
assert_eq(a1, a2)
assert_eq(b1, b2)
def test_histogram_return_type():
v = da.random.random(100, chunks=10)
bins = np.arange(0, 1.01, 0.01)
# Check if return type is same as hist
bins = np.arange(0, 11, 1, dtype='i4')
assert_eq(da.histogram(v * 10, bins=bins)[0],
np.histogram(v * 10, bins=bins)[0])
def test_histogram_extra_args_and_shapes():
# Check for extra args and shapes
bins = np.arange(0, 1.01, 0.01)
v = da.random.random(100, chunks=10)
data = [(v, bins, da.ones(100, chunks=v.chunks) * 5),
(da.random.random((50, 50), chunks=10), bins, da.ones((50, 50), chunks=10) * 5)]
for v, bins, w in data:
# density
assert_eq(da.histogram(v, bins=bins, normed=True)[0],
np.histogram(v, bins=bins, normed=True)[0])
# normed
assert_eq(da.histogram(v, bins=bins, density=True)[0],
np.histogram(v, bins=bins, density=True)[0])
# weights
assert_eq(da.histogram(v, bins=bins, weights=w)[0],
np.histogram(v, bins=bins, weights=w)[0])
assert_eq(da.histogram(v, bins=bins, weights=w, density=True)[0],
da.histogram(v, bins=bins, weights=w, density=True)[0])
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (concatenate3([[x, x, x], [x, x, x]]) ==
np.array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])).all()
assert (concatenate3([[x, x], [x, x], [x, x]]) ==
np.array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])).all()
x = np.arange(12).reshape((2, 2, 3))
assert_eq(concatenate3([[[x, x, x],
[x, x, x]],
[[x, x, x],
[x, x, x]]]),
np.array([[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]],
[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]]]))
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert_eq(da.core.map_blocks(lambda a, b: a+2*b, d, e, dtype=d.dtype),
x + 2*y)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2*b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert_eq(res, x + 2*z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert_eq(da.map_blocks(func, f, d, dtype=d.dtype),
z + 2*x)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_cache():
x = da.arange(15, chunks=5)
y = 2 * x + 1
z = y.cache()
assert len(z.dask) == 3 # very short graph
assert_eq(y, z)
cache = np.empty(15, dtype=y.dtype)
z = y.cache(store=cache)
assert len(z.dask) < 6 # very short graph
assert z.chunks == y.chunks
assert_eq(y, z)
def test_take_dask_from_numpy():
x = np.arange(5).astype('f8')
y = da.from_array(np.array([1, 2, 3, 3, 2 ,1]), chunks=3)
z = da.take(x * 2, y)
assert z.chunks == y.chunks
assert_eq(z, np.array([2., 4., 6., 6., 4., 2.]))
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask.pydata.org" in str(e)
assert raises(ValueError, lambda: da.ones(6))
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert 'rechunk(2)' in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, out=None)
except TypeError as e:
assert 'minimum' in str(e)
assert 'out' in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert_eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
h5py = pytest.importorskip('h5py')
with tmpfile('h5') as fn:
with h5py.File(fn) as f:
x = f.create_dataset('/x', shape=(10, 10), dtype='f8')
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(get=get_sync).shape == (1, 10, 10)
assert d[:, None, :].compute(get=get_sync).shape == (10, 1, 10)
assert d[:, :, None].compute(get=get_sync).shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert_eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _vindex_transpose, _get_axis
x = np.arange(4*5*6*7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
for i in ind]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert_eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
assert raises(IndexError, lambda: d.vindex[0])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [1, 2, 3], 0])
assert raises(IndexError, lambda: d.vindex[[1], [1, 2, 3]])
assert raises(IndexError, lambda: d.vindex[[1, 2, 3], [[1], [2], [3]]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]),
np.array([[10, 20, 30], [40, 50, 60]])]
assert (_vindex_merge(locations, values) == np.array([[40, 50, 60],
[1, 2, 3],
[10, 20, 30]])).all()
def test_empty_array():
assert_eq(np.arange(0), da.arange(0, chunks=5))
def test_array():
x = np.ones(5, dtype='i4')
d = da.ones(5, chunks=3, dtype='i4')
assert_eq(da.array(d, ndmin=3, dtype='i8'),
np.array(x, ndmin=3, dtype='i8'))
def test_cov():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(4, 4))
assert_eq(da.cov(d), np.cov(x))
assert_eq(da.cov(d, rowvar=0), np.cov(x, rowvar=0))
assert_eq(da.cov(d, ddof=10), np.cov(x, ddof=10))
assert_eq(da.cov(d, bias=1), np.cov(x, bias=1))
assert_eq(da.cov(d, d), np.cov(x, x))
y = np.arange(8)
e = da.from_array(y, chunks=(4,))
assert_eq(da.cov(d, e), np.cov(x, y))
assert_eq(da.cov(e, d), np.cov(y, x))
assert raises(ValueError, lambda: da.cov(d, ddof=1.5))
def test_corrcoef():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(4, 4))
assert_eq(da.corrcoef(d), np.corrcoef(x))
assert_eq(da.corrcoef(d, rowvar=0), np.corrcoef(x, rowvar=0))
assert_eq(da.corrcoef(d, d), np.corrcoef(x, x))
y = np.arange(8)
e = da.from_array(y, chunks=(4,))
assert_eq(da.corrcoef(d, e), np.corrcoef(x, y))
assert_eq(da.corrcoef(e, d), np.corrcoef(y, x))
def test_memmap():
with tmpfile('npy') as fn_1:
with tmpfile('npy') as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode='w+', dtype=x.dtype)
x.store(target)
assert_eq(target, x)
np.save(fn_2, target)
assert_eq(np.load(fn_2, mmap_mode='r'), x)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5*10*10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
with tmpdir() as dirname:
da.to_npy_stack(dirname, d, axis=0)
assert os.path.exists(os.path.join(dirname, '0.npy'))
assert (np.load(os.path.join(dirname, '1.npy')) == x[2:4]).all()
e = da.from_npy_stack(dirname)
assert_eq(d, e)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view('i4'), d.view('i4'))
assert_eq(x.view('i2'), d.view('i2'))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(4,))
assert_eq(x.view('i4'), d.view('i4'))
with pytest.raises(ValueError):
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(3,))
d.view('i4')
with pytest.raises(ValueError):
d.view('i4', order='asdf')
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view('i4'), d.view('i4', order='F'))
assert_eq(x.view('i2'), d.view('i2', order='F'))
def test_h5py_tokenize():
h5py = pytest.importorskip('h5py')
with tmpfile('hdf5') as fn1:
with tmpfile('hdf5') as fn2:
f = h5py.File(fn1)
g = h5py.File(fn2)
f['x'] = np.arange(10).astype(float)
g['x'] = np.ones(10).astype(float)
x1 = f['x']
x2 = g['x']
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0,
dtype=d.dtype)
assert e.ndim == 1
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1), new_axis=[0, 3], dtype=d.dtype)
assert e.ndim == 4
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
def test_broadcast_chunks():
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((1,), (5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((3, 3,), (5, 5),)
with pytest.raises(ValueError):
broadcast_chunks(a, b)
a = ((1,), (5, 5),)
b = ((1,), (5, 5),)
assert broadcast_chunks(a, b) == a
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
def test_dont_fuse_outputs():
dsk = {('x', 0): np.array([1, 2]),
('x', 1): (inc, ('x', 0))}
a = da.Array(dsk, 'x', chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)
assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))
def test_dont_dealias_outputs():
dsk = {('x', 0, 0): np.ones((2, 2)),
('x', 0, 1): np.ones((2, 2)),
('x', 1, 0): np.ones((2, 2)),
('x', 1, 1): ('x', 0, 0)}
a = da.Array(dsk, 'x', chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)
assert_eq(a, np.ones((4, 4)))
def test_timedelta_op():
x = np.array([np.timedelta64(10, 'h')])
y = np.timedelta64(1, 'h')
a = da.from_array(x, chunks=(1,)) / y
assert a.compute() == x / y
def test_to_delayed():
x = da.random.random((4, 4), chunks=(2, 2))
y = x + 10
[[a, b], [c, d]] = y.to_delayed()
assert_eq(a.compute(), y[:2, :2])
def test_cumulative():
x = da.arange(20, chunks=5)
assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())
assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())
assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))
assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))
a = np.random.random((20))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=5)
assert_eq(da.nancumsum(x, axis=0), nancumsum(a))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a))
a = np.random.random((20, 24))
x = da.from_array(a, chunks=(6, 5))
assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))
assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))
assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))
assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24, 13))
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2]:
assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))
assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
a = np.random.random((20, 24, 13))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2]:
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
def test_eye():
assert_eq(da.eye(9, chunks=3), np.eye(9))
assert_eq(da.eye(10, chunks=3), np.eye(10))
assert_eq(da.eye(9, chunks=3, M=11), np.eye(9, M=11))
assert_eq(da.eye(11, chunks=3, M=9), np.eye(11, M=9))
assert_eq(da.eye(7, chunks=3, M=11), np.eye(7, M=11))
assert_eq(da.eye(11, chunks=3, M=7), np.eye(11, M=7))
assert_eq(da.eye(9, chunks=3, k=2), np.eye(9, k=2))
assert_eq(da.eye(9, chunks=3, k=-2), np.eye(9, k=-2))
assert_eq(da.eye(7, chunks=3, M=11, k=5), np.eye(7, M=11, k=5))
assert_eq(da.eye(11, chunks=3, M=7, k=-6), np.eye(11, M=7, k=-6))
assert_eq(da.eye(6, chunks=3, M=9, k=7), np.eye(6, M=9, k=7))
assert_eq(da.eye(12, chunks=3, M=6, k=-3), np.eye(12, M=6, k=-3))
assert_eq(da.eye(9, chunks=3, dtype=int), np.eye(9, dtype=int))
assert_eq(da.eye(10, chunks=3, dtype=int), np.eye(10, dtype=int))
def test_diag():
v = np.arange(11)
assert_eq(da.diag(v), np.diag(v))
v = da.arange(11, chunks=3)
darr = da.diag(v)
nparr = np.diag(v)
assert_eq(darr, nparr)
assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)
v = v + v + 3
darr = da.diag(v)
nparr = np.diag(v)
assert_eq(darr, nparr)
v = da.arange(11, chunks=11)
darr = da.diag(v)
nparr = np.diag(v)
assert_eq(darr, nparr)
assert sorted(da.diag(v).dask) == sorted(da.diag(v).dask)
x = np.arange(64).reshape((8, 8))
assert_eq(da.diag(x), np.diag(x))
d = da.from_array(x, chunks=(4, 4))
assert_eq(da.diag(d), np.diag(x))
def test_tril_triu():
A = np.random.randn(20, 20)
for chunk in [5, 4]:
dA = da.from_array(A, (chunk, chunk))
assert np.allclose(da.triu(dA).compute(), np.triu(A))
assert np.allclose(da.tril(dA).compute(), np.tril(A))
for k in [-25, -20, -19, -15, -14, -9, -8, -6, -5, -1,
1, 4, 5, 6, 8, 10, 11, 15, 16, 19, 20, 21]:
assert np.allclose(da.triu(dA, k).compute(), np.triu(A, k))
assert np.allclose(da.tril(dA, k).compute(), np.tril(A, k))
def test_tril_triu_errors():
A = np.random.random_integers(0, 10, (10, 10, 10))
dA = da.from_array(A, chunks=(5, 5, 5))
assert raises(ValueError, lambda: da.triu(dA))
A = np.random.random_integers(0, 10, (30, 35))
dA = da.from_array(A, chunks=(5, 5))
assert raises(NotImplementedError, lambda: da.triu(dA))
def test_atop_names():
x = da.ones(5, chunks=(2,))
y = atop(add, 'i', x, 'i')
assert y.name.startswith('add')
def test_atop_kwargs():
def f(a, b=0):
return a + b
x = da.ones(5, chunks=(2,))
y = atop(f, 'i', x, 'i', b=10, dtype=x.dtype)
assert_eq(y, np.ones(5) + 10)
def test_from_delayed():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)
assert isinstance(x, Array)
assert_eq(x, np.ones((5, 3)))
def test_A_property():
x = da.ones(5, chunks=(2,))
assert x.A is x
def test_copy():
x = da.ones(5, chunks=(2,))
assert x.copy() is x
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
def test_astype_gh1151():
a = np.arange(5).astype(np.int32)
b = da.from_array(a, (1,))
assert_eq(a.astype(np.int16), b.astype(np.int16))
def test_elemwise_name():
assert (da.ones(5, chunks=2) + 1).name.startswith('add-')
def test_map_blocks_name():
assert da.ones(5, chunks=2).map_blocks(inc).name.startswith('inc-')
def test_from_array_names():
pytest.importorskip('distributed')
from distributed.utils import key_split
x = np.ones(10)
d = da.from_array(x, chunks=2)
names = countby(key_split, d.dask)
assert set(names.values()) == set([1, 5])
| {
"repo_name": "mikegraham/dask",
"path": "dask/array/tests/test_array_core.py",
"copies": "1",
"size": "62339",
"license": "bsd-3-clause",
"hash": -5042705004294711000,
"line_mean": 29.573320255,
"line_max": 92,
"alpha_frac": 0.5210061117,
"autogenerated": false,
"ratio": 2.638017857898523,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36590239695985227,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import dask.array as da
from dask.array.utils import assert_eq as _assert_eq
from dask.core import get_deps
from dask.context import set_options
import numpy as np
# temporary until numpy functions migrated
try:
from numpy import nanprod
except ImportError: # pragma: no cover
import dask.array.numpy_compat as npcompat
nanprod = npcompat.nanprod
def assert_eq(a, b):
_assert_eq(a, b, equal_nan=True)
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert same_keys(da_func(darr), da_func(darr))
assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
assert same_keys(da_func(darr, dtype='i8'), da_func(darr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=2)
a2 = da_func(darr, split_every={0: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=2),
np_func(narr, keepdims=True))
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_1D(dtype):
x = np.arange(5).astype(dtype)
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
reduction_1d_test(da.nanprod, a, nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True,
split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=-1), np_func(narr, axis=-1))
assert_eq(da_func(darr, axis=-2), np_func(narr, axis=-2))
assert_eq(da_func(darr, axis=1, keepdims=True),
np_func(narr, axis=1, keepdims=True))
assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))
assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))
assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=4)
a2 = da_func(darr, split_every={0: 2, 1: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=4),
np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=0, keepdims=True, split_every=2),
np_func(narr, axis=0, keepdims=True))
assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=1, keepdims=True, split_every=2),
np_func(narr, axis=1, keepdims=True))
def test_reduction_errors():
x = da.ones((5, 5), chunks=(3, 3))
with pytest.raises(ValueError):
x.sum(axis=2)
with pytest.raises(ValueError):
x.sum(axis=-3)
@pytest.mark.slow
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_2D(dtype):
x = np.arange(1, 122).reshape((11, 11)).astype(dtype)
a = da.from_array(x, chunks=(4, 4))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
reduction_2d_test(da.nanprod, a, nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.argmin, np.argmin), (da.argmax, np.argmax),
(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_arg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
with set_options(split_every=2):
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
pytest.raises(ValueError, lambda: dfunc(a, 3))
pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))
x2 = np.arange(10)
a2 = da.from_array(x2, chunks=3)
assert_eq(dfunc(a2), func(x2))
assert_eq(dfunc(a2, 0), func(x2, 0))
assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_nanarg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
x[5] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
with pytest.raises(ValueError):
dfunc(a, 1).compute()
with pytest.raises(ValueError):
dfunc(a, 2).compute()
x[:] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
with pytest.raises(ValueError):
dfunc(a).compute()
def test_reductions_2D_nans():
# chunks are a mix of some/all/no NaNs
x = np.full((4, 4), np.nan)
x[:2, :2] = np.array([[1, 2], [3, 4]])
x[2, 2] = 5
x[3, 3] = 6
a = da.from_array(x, chunks=(2, 2))
reduction_2d_test(da.sum, a, np.sum, x, False, False)
reduction_2d_test(da.prod, a, np.prod, x, False, False)
reduction_2d_test(da.mean, a, np.mean, x, False, False)
reduction_2d_test(da.var, a, np.var, x, False, False)
reduction_2d_test(da.std, a, np.std, x, False, False)
reduction_2d_test(da.min, a, np.min, x, False, False)
reduction_2d_test(da.max, a, np.max, x, False, False)
reduction_2d_test(da.any, a, np.any, x, False, False)
reduction_2d_test(da.all, a, np.all, x, False, False)
reduction_2d_test(da.nansum, a, np.nansum, x, False, False)
reduction_2d_test(da.nanprod, a, nanprod, x, False, False)
reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)
reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)
reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)
assert_eq(da.argmax(a), np.argmax(x))
assert_eq(da.argmin(a), np.argmin(x))
assert_eq(da.nanargmax(a), np.nanargmax(x))
assert_eq(da.nanargmin(a), np.nanargmin(x))
assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
def test_moment():
def moment(x, n, axis=None):
return (((x - x.mean(axis=axis, keepdims=True)) ** n).sum(axis=axis) /
np.ones_like(x).sum(axis=axis))
# Poorly conditioned
x = np.array([1., 2., 3.] * 10).reshape((3, 10)) + 1e8
a = da.from_array(x, chunks=5)
assert_eq(a.moment(2), moment(x, 2))
assert_eq(a.moment(3), moment(x, 3))
assert_eq(a.moment(4), moment(x, 4))
x = np.arange(1, 122).reshape((11, 11)).astype('f8')
a = da.from_array(x, chunks=(4, 4))
assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))
assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))
# Tree reduction
assert_eq(a.moment(order=4, split_every=4), moment(x, 4))
assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))
assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))
def test_reductions_with_negative_axes():
x = np.random.random((4, 4, 4))
a = da.from_array(x, chunks=2)
assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))
assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))
assert_eq(a.sum(axis=-1), x.sum(axis=-1))
assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, chunks=(2, 2))
assert_eq(np.nansum(x), da.nansum(d))
assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert_eq(np.nanvar(x), da.nanvar(d))
assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
assert_eq(nanprod(x), da.nanprod(d))
def test_0d_array():
x = da.mean(da.ones(4, chunks=4), axis=0).compute()
y = np.mean(np.ones(4))
assert type(x) == type(y)
x = da.sum(da.zeros(4, chunks=1)).compute()
y = np.sum(np.zeros(4))
assert type(x) == type(y)
def test_reduction_on_scalar():
x = da.from_array(np.array(1.0), chunks=())
assert (x == x).all()
def test_reductions_with_empty_array():
dx1 = da.ones((10, 0, 5), chunks=4)
x1 = dx1.compute()
dx2 = da.ones((0, 0, 0), chunks=4)
x2 = dx2.compute()
for dx, x in [(dx1, x1), (dx2, x2)]:
assert_eq(dx.mean(), x.mean())
assert_eq(dx.mean(axis=0), x.mean(axis=0))
assert_eq(dx.mean(axis=1), x.mean(axis=1))
assert_eq(dx.mean(axis=2), x.mean(axis=2))
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def test_tree_reduce_depth():
# 2D
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
thresh = {0: 2, 1: 3}
assert_max_deps(x.sum(split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
# 3D
x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))
thresh = {0: 2, 1: 3, 2: 4}
assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(axis=2, split_every=thresh), 4)
assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)
assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
assert_max_deps(x.sum(axis=2, split_every=20), 6)
assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)
def test_tree_reduce_set_options():
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
with set_options(split_every={0: 2, 1: 3}):
assert_max_deps(x.sum(), 2 * 3)
assert_max_deps(x.sum(axis=0), 2)
def test_reduction_names():
x = da.ones(5, chunks=(2,))
assert x.sum().name.startswith('sum')
assert 'max' in x.max().name.split('-')[0]
assert x.var().name.startswith('var')
assert x.all().name.startswith('all')
assert any(k[0].startswith('nansum') for k in da.nansum(x).dask)
assert x.mean().name.startswith('mean')
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/tests/test_reductions.py",
"copies": "1",
"size": "14323",
"license": "bsd-3-clause",
"hash": 1115947319936954000,
"line_mean": 37.6064690027,
"line_max": 86,
"alpha_frac": 0.603295399,
"autogenerated": false,
"ratio": 2.573302191879267,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8673480139959999,
"avg_score": 0.0006234901838535607,
"num_lines": 371
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import dask.array as da
from dask.utils import ignoring
from dask.array.reductions import arg_aggregate
import numpy as np
def eq(a, b):
if isinstance(a, da.Array):
a = a.compute()
if isinstance(b, da.Array):
b = b.compute()
if isinstance(a, (np.generic, np.ndarray)):
return np.allclose(a, b)
else:
return a == b
def test_arg_reduction():
pairs = [([4, 3, 5], [10, 11, 12]),
([3, 5, 1], [1, 2, 3])]
result = arg_aggregate(np.min, np.argmin, (100, 100), pairs)
assert eq(result, np.array([101, 11, 103]))
def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True):
assert eq(da_func(darr), np_func(narr))
assert eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
if use_dtype:
assert eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
def test_reductions_1D_float():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_1d_test(da.nanprod, a, np.nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def test_reductions_1D_int():
x = np.arange(5).astype('i4')
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_1d_test(da.nanprod, a, np.nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True):
assert eq(da_func(darr), np_func(narr))
assert eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert eq(da_func(darr, axis=0), np_func(narr, axis=0))
assert eq(da_func(darr, axis=1), np_func(narr, axis=1))
assert eq(da_func(darr, axis=1, keepdims=True),
np_func(narr, axis=1, keepdims=True))
assert eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))
if use_dtype:
assert eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
def test_reductions_2D_float():
x = np.arange(1, 122).reshape((11, 11)).astype('f4')
a = da.from_array(x, chunks=(4, 4))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_2d_test(da.nanprod, a, np.nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def test_reductions_2D_int():
x = np.arange(1, 122).reshape((11, 11)).astype('i4')
a = da.from_array(x, chunks=(4, 4))
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_2d_test(da.nanprod, a, np.nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
assert eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
assert eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
assert eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
def test_moment():
def moment(x, n, axis=None):
return ((x - x.mean(axis=axis, keepdims=True))**n).sum(
axis=axis)/np.ones_like(x).sum(axis=axis)
# Poorly conditioned
x = np.array([1., 2., 3.]*10).reshape((3, 10)) + 1e8
a = da.from_array(x, chunks=5)
assert eq(a.moment(2), moment(x, 2))
assert eq(a.moment(3), moment(x, 3))
assert eq(a.moment(4), moment(x, 4))
x = np.arange(1, 122).reshape((11, 11)).astype('f8')
a = da.from_array(x, chunks=(4, 4))
assert eq(a.moment(4, axis=1), moment(x, 4, axis=1))
assert eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))
def test_reductions_with_negative_axes():
x = np.random.random((4, 4, 4))
a = da.from_array(x, chunks=2)
assert eq(a.argmin(axis=-1), x.argmin(axis=-1))
assert eq(a.sum(axis=-1), x.sum(axis=-1))
assert eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, chunks=(2, 2))
assert eq(np.nansum(x), da.nansum(d))
assert eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert eq(np.nanvar(x), da.nanvar(d))
assert eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
with ignoring(AttributeError):
assert eq(np.nanprod(x), da.nanprod(d))
def test_0d_array():
x = da.mean(da.ones(4, chunks=4), axis=0).compute()
y = np.mean(np.ones(4))
assert type(x) == type(y)
x = da.sum(da.zeros(4, chunks=1)).compute()
y = np.sum(np.zeros(4))
assert type(x) == type(y)
| {
"repo_name": "simudream/dask",
"path": "dask/array/tests/test_reductions.py",
"copies": "4",
"size": "8980",
"license": "bsd-3-clause",
"hash": 1479019833814786000,
"line_mean": 38.2139737991,
"line_max": 84,
"alpha_frac": 0.6190423163,
"autogenerated": false,
"ratio": 2.5196408529741863,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000844717914745765,
"num_lines": 229
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import numpy as np
from dask.array.chunk import coarsen, keepdims_wrapper
import dask.array as da
def test_keepdims_wrapper_no_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a)
rw = summer_wrapped(a, keepdims=True)
rwf = summer_wrapped(a, keepdims=False)
assert r.ndim == 0
assert r.shape == tuple()
assert r == 276
assert rw.ndim == 4
assert rw.shape == (1, 1, 1, 1)
assert (rw == 276).all()
assert rwf.ndim == 0
assert rwf.shape == tuple()
assert rwf == 276
def test_keepdims_wrapper_one_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=2)
rw = summer_wrapped(a, axis=2, keepdims=True)
rwf = summer_wrapped(a, axis=2, keepdims=False)
assert r.ndim == 3
assert r.shape == (1, 2, 4)
assert (r == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 2, 1, 4)
assert (rw == np.array([[[[12, 15, 18, 21]], [[48, 51, 54, 57]]]])).all()
assert rwf.ndim == 3
assert rwf.shape == (1, 2, 4)
assert (rwf == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
def test_keepdims_wrapper_two_axes():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=(1, 3))
rw = summer_wrapped(a, axis=(1, 3), keepdims=True)
rwf = summer_wrapped(a, axis=(1, 3), keepdims=False)
assert r.ndim == 2
assert r.shape == (1, 3)
assert (r == np.array([[60, 92, 124]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 1, 3, 1)
assert (rw == np.array([[[[60], [92], [124]]]])).all()
assert rwf.ndim == 2
assert rwf.shape == (1, 3)
assert (rwf == np.array([[60, 92, 124]])).all()
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
"""
def test_coarsen_on_uneven_shape():
x = np.random.randint(10, size=(23, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
assert eq(y[11, :], x[23, :])
"""
def test_integer_input():
assert da.zeros((4, 6), chunks=2).rechunk(3).chunks == ((3, 1), (3, 3))
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/tests/test_chunk.py",
"copies": "6",
"size": "2956",
"license": "bsd-3-clause",
"hash": 2271109899541926100,
"line_mean": 24.9298245614,
"line_max": 77,
"alpha_frac": 0.5818673884,
"autogenerated": false,
"ratio": 2.8098859315589353,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6391753319958936,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import numpy as np
from dask.array.chunk import coarsen, keepdims_wrapper, trim
import dask.array as da
def test_keepdims_wrapper_no_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a)
rw = summer_wrapped(a, keepdims=True)
rwf = summer_wrapped(a, keepdims=False)
assert r.ndim == 0
assert r.shape == tuple()
assert r == 276
assert rw.ndim == 4
assert rw.shape == (1, 1, 1, 1)
assert (rw == 276).all()
assert rwf.ndim == 0
assert rwf.shape == tuple()
assert rwf == 276
def test_keepdims_wrapper_one_axis():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=2)
rw = summer_wrapped(a, axis=2, keepdims=True)
rwf = summer_wrapped(a, axis=2, keepdims=False)
assert r.ndim == 3
assert r.shape == (1, 2, 4)
assert (r == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 2, 1, 4)
assert (rw == np.array([[[[12, 15, 18, 21]], [[48, 51, 54, 57]]]])).all()
assert rwf.ndim == 3
assert rwf.shape == (1, 2, 4)
assert (rwf == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()
def test_keepdims_wrapper_two_axes():
def summer(a, axis=None):
return a.sum(axis=axis)
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
r = summer(a, axis=(1, 3))
rw = summer_wrapped(a, axis=(1, 3), keepdims=True)
rwf = summer_wrapped(a, axis=(1, 3), keepdims=False)
assert r.ndim == 2
assert r.shape == (1, 3)
assert (r == np.array([[60, 92, 124]])).all()
assert rw.ndim == 4
assert rw.shape == (1, 1, 3, 1)
assert (rw == np.array([[[[60], [92], [124]]]])).all()
assert rwf.ndim == 2
assert rwf.shape == (1, 3)
assert (rwf == np.array([[60, 92, 124]])).all()
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
"""
def test_coarsen_on_uneven_shape():
x = np.random.randint(10, size=(23, 24))
y = coarsen(np.sum, x, {0: 2, 1: 4})
assert y.shape == (12, 6)
assert y[0, 0] == np.sum(x[:2, :4])
assert eq(y[11, :], x[23, :])
"""
def test_integer_input():
assert da.zeros((4, 6), chunks=2).rechunk(3).chunks == ((3, 1), (3, 3))
| {
"repo_name": "jayhetee/dask",
"path": "dask/array/tests/test_chunk.py",
"copies": "12",
"size": "3060",
"license": "bsd-3-clause",
"hash": 2776134822795118000,
"line_mean": 24.2892561983,
"line_max": 77,
"alpha_frac": 0.5777777778,
"autogenerated": false,
"ratio": 2.809917355371901,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009148073408015365,
"num_lines": 121
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('sqlalchemy')
import os
from decimal import Decimal
from functools import partial
from textwrap import dedent
import datashape
from datashape import (
date_,
datetime_,
discover,
dshape,
int_,
int64,
float32,
float64,
string,
var,
Option,
R,
)
from datashape.util.testing import assert_dshape_equal
import numpy as np
import pandas as pd
import sqlalchemy as sa
from odo import convert, append, resource, into, odo, chunks
from odo.backends.sql import (
dshape_to_table, create_from_datashape, dshape_to_alchemy,
discover_sqlalchemy_selectable
)
from odo.utils import tmpfile, raises
from six import string_types
def test_resource():
sql = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
assert sql.name == 'mytable'
assert isinstance(sql.bind, sa.engine.base.Engine)
assert set(c.name for c in sql.c) == set(['x', 'y'])
def test_append_and_convert_round_trip():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('bank', metadata,
sa.Column('name', sa.String, primary_key=True),
sa.Column('balance', sa.Integer))
t.create()
data = [('Alice', 1), ('Bob', 2)]
append(t, data)
assert convert(list, t) == data
def test_plus_must_have_text():
with pytest.raises(NotImplementedError):
resource('redshift+://user:pass@host:1234/db')
def test_resource_on_file():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri, 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri + '::' + 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
def test_resource_to_engine():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
r = resource(uri)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
def test_resource_to_engine_to_create_tables():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
ds = datashape.dshape('{mytable: var * {name: string, amt: int}}')
r = resource(uri, dshape=ds)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
assert discover(r) == ds
def test_discover():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('timestamp', sa.DateTime, primary_key=True))
ds = dshape('var * {name: ?string, amount: ?int32, timestamp: datetime}')
assert_dshape_equal(discover(s), ds)
for name in ds.measure.names:
assert isinstance(name, string_types)
def test_discover_numeric_column():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('name', metadata,
sa.Column('name', sa.types.NUMERIC),)
assert discover(s)
def test_discover_null_columns():
assert dshape(discover(sa.Column('name', sa.String, nullable=True))) == \
dshape('{name: ?string}')
assert dshape(discover(sa.Column('name', sa.String, nullable=False))) == \
dshape('{name: string}')
def test_discover_selectable():
t = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
q = sa.select([t.c.x]).limit(5)
assert discover(q) == dshape('var * {x: int}')
def test_discover_fixed_length_string():
t = resource('sqlite:///:memory:::mytable',
dshape='var * {x: string[30]}')
assert discover(t) == dshape('var * {x: string[30]}')
def single_table_engine():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
t.create()
return engine, t
def test_select_to_iterator():
engine, t = single_table_engine()
append(t, [('Alice', 100), ('Bob', 200)])
sel = sa.select([t.c.amount + 1])
assert convert(list, sel) == [(101,), (201,)]
assert convert(list, sel, dshape=dshape('var * int')) == [101, 201]
sel2 = sa.select([sa.sql.func.sum(t.c.amount)])
assert convert(int, sel2, dshape=dshape('int')) == 300
sel3 = sa.select([t])
result = convert(list, sel3, dshape=discover(t))
assert type(result[0]) is tuple
for res in result:
assert isinstance(res[0], string_types)
def test_discovery_engine():
engine, t = single_table_engine()
assert discover(engine, 'accounts') == discover(t)
assert str(discover(engine)) == str(discover({'accounts': t}))
def test_discovery_metadata():
engine, t = single_table_engine()
metadata = t.metadata
assert str(discover(metadata)) == str(discover({'accounts': t}))
def test_discover_views():
engine, t = single_table_engine()
metadata = t.metadata
with engine.connect() as conn:
conn.execute('''CREATE VIEW myview AS
SELECT name, amount
FROM accounts
WHERE amount > 0''')
assert str(discover(metadata)) == str(
discover({'accounts': t, 'myview': t}))
def test_extend_empty():
engine, t = single_table_engine()
assert not convert(list, t)
append(t, [])
assert not convert(list, t)
def test_dshape_to_alchemy():
assert dshape_to_alchemy('string') == sa.Text
assert isinstance(dshape_to_alchemy('string[40]'), sa.String)
assert not isinstance(dshape_to_alchemy('string["ascii"]'), sa.Unicode)
assert isinstance(dshape_to_alchemy('string[40, "U8"]'), sa.Unicode)
assert dshape_to_alchemy('string[40]').length == 40
assert dshape_to_alchemy('float32') == sa.REAL
assert dshape_to_alchemy('float64') == sa.FLOAT
def test_dshape_to_table():
t = dshape_to_table('bank', '{name: string, amount: int}')
assert isinstance(t, sa.Table)
assert t.name == 'bank'
assert [c.name for c in t.c] == ['name', 'amount']
td_freqs = list(zip(['D', 'h', 'm', 's', 'ms', 'us', 'ns'],
[0, 0, 0, 0, 3, 6, 9],
[9, 0, 0, 0, 0, 0, 0]))
@pytest.mark.parametrize(['freq', 'secp', 'dayp'], td_freqs)
def test_dshape_to_table_with_timedelta(freq, secp, dayp):
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert isinstance(t, sa.Table)
assert t.name == 'td_bank'
assert isinstance(t.c.duration.type, sa.types.Interval)
assert t.c.duration.type.second_precision == secp
assert t.c.duration.type.day_precision == dayp
@pytest.mark.xfail(raises=NotImplementedError)
def test_dshape_to_table_month():
ds = '{name: string, amount: int, duration: timedelta[unit="M"]}'
dshape_to_table('td_bank', ds)
@pytest.mark.xfail(raises=NotImplementedError)
def test_dshape_to_table_year():
ds = '{name: string, amount: int, duration: timedelta[unit="Y"]}'
dshape_to_table('td_bank', ds)
@pytest.mark.parametrize('freq', ['D', 's', 'ms', 'us', 'ns'])
def test_timedelta_sql_discovery(freq):
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert discover(t).measure['duration'] == datashape.TimeDelta(freq)
@pytest.mark.parametrize('freq', ['h', 'm'])
def test_timedelta_sql_discovery_hour_minute(freq):
# these always compare equal to a seconds timedelta, because no data loss
# will occur with this heuristic. this implies that the sa.Table was
# constructed with day_precision == 0 and second_precision == 0
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert discover(t).measure['duration'] == datashape.TimeDelta('s')
prec = {
's': 0,
'ms': 3,
'us': 6,
'ns': 9
}
@pytest.mark.parametrize('freq', list(prec.keys()))
def test_discover_postgres_intervals(freq):
precision = prec.get(freq)
typ = sa.dialects.postgresql.base.INTERVAL(precision=precision)
t = sa.Table('t', sa.MetaData(), sa.Column('dur', typ))
assert discover(t) == dshape('var * {dur: ?timedelta[unit="%s"]}' % freq)
# between postgresql and oracle, only oracle has support for day intervals
@pytest.mark.parametrize('freq', ['D'] + list(prec.keys()))
def test_discover_oracle_intervals(freq):
typ = sa.dialects.oracle.base.INTERVAL(day_precision={'D': 9}.get(freq),
second_precision=prec.get(freq, 0))
t = sa.Table('t', sa.MetaData(), sa.Column('dur', typ))
assert discover(t) == dshape('var * {dur: ?timedelta[unit="%s"]}' % freq)
@pytest.mark.parametrize(
'typ,dtype', (
(sa.DATETIME, datetime_),
(sa.TIMESTAMP, datetime_),
(sa.FLOAT, float64),
(sa.DATE, date_),
(sa.BIGINT, int64),
(sa.INTEGER, int_),
(sa.BIGINT, int64),
(sa.types.NullType, string),
(sa.REAL, float32),
(sa.Float, float64),
(sa.Float(precision=8), float32),
(sa.Float(precision=24), float32),
(sa.Float(precision=42), float64),
(sa.Float(precision=53), float64),
),
)
def test_types(typ, dtype):
expected = var * R['value': Option(dtype)]
t = sa.Table('t', sa.MetaData(), sa.Column('value', typ))
assert_dshape_equal(discover(t), expected)
@pytest.mark.parametrize(
'typ', (
sa.Float(precision=-1),
sa.Float(precision=0),
sa.Float(precision=54)
)
)
def test_unsupported_precision(typ):
t = sa.Table('t', sa.MetaData(), sa.Column('value', typ))
with pytest.raises(ValueError) as err:
discover(t)
assert str(err.value) == "{} is not a supported precision".format(
typ.precision)
def test_mssql_types():
typ = sa.dialects.mssql.BIT()
t = sa.Table('t', sa.MetaData(), sa.Column('bit', typ))
assert_dshape_equal(discover(t), dshape('var * {bit: ?bool}'))
typ = sa.dialects.mssql.DATETIMEOFFSET()
t = sa.Table('t', sa.MetaData(), sa.Column('dt', typ))
assert_dshape_equal(discover(t), dshape('var * {dt: ?string}'))
typ = sa.dialects.mssql.MONEY()
t = sa.Table('t', sa.MetaData(), sa.Column('money', typ))
assert_dshape_equal(discover(t), dshape('var * {money: ?float64}'))
typ = sa.dialects.mssql.SMALLMONEY()
t = sa.Table('t', sa.MetaData(), sa.Column('money', typ))
assert_dshape_equal(discover(t), dshape('var * {money: ?float32}'))
typ = sa.dialects.mssql.UNIQUEIDENTIFIER()
t = sa.Table('t', sa.MetaData(), sa.Column('uuid', typ))
assert_dshape_equal(discover(t), dshape('var * {uuid: ?string}'))
def test_create_from_datashape():
engine = sa.create_engine('sqlite:///:memory:')
ds = dshape('''{bank: var * {name: string, amount: int},
points: var * {x: int, y: int}}''')
engine = create_from_datashape(engine, ds)
assert discover(engine) == ds
def test_into_table_iterator():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = dshape_to_table('points', '{x: int, y: int}', metadata=metadata)
t.create()
data = [(1, 1), (2, 4), (3, 9)]
append(t, data)
assert convert(list, t) == data
assert isinstance(convert(list, t)[0], tuple)
t2 = dshape_to_table('points2', '{x: int, y: int}', metadata=metadata)
t2.create()
data2 = [{'x': 1, 'y': 1}, {'x': 2, 'y': 4}, {'x': 3, 'y': 9}]
append(t2, data2)
assert convert(list, t2) == data
def test_sql_field_names_disagree_on_order():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
append(r, [(1, 2), (10, 20)], dshape=dshape('{y: int, x: int}'))
assert convert(set, r) == set([(2, 1), (20, 10)])
def test_sql_field_names_disagree_on_names():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
assert raises(Exception, lambda: append(r, [(1, 2), (10, 20)],
dshape=dshape('{x: int, z: int}')))
def test_resource_on_dialects():
assert (resource.dispatch('mysql://foo') is
resource.dispatch('mysql+pymysql://foo'))
assert (resource.dispatch('never-before-seen-sql://foo') is
resource.dispatch('mysql://foo'))
@pytest.yield_fixture
def sqlite_file():
try:
yield 'sqlite:///db.db'
finally:
os.remove('db.db')
def test_append_from_select(sqlite_file):
# we can't test in memory here because that creates two independent
# databases
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'U5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'U5')])
t = into('%s::t' % sqlite_file, raw)
s = into('%s::s' % sqlite_file, raw2)
t = append(t, s.select())
result = into(list, t)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_append_from_table():
# we can't test in memory here because that creates two independent
# databases
with tmpfile('db') as fn:
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'U5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'U5')])
t = into('sqlite:///%s::t' % fn, raw)
s = into('sqlite:///%s::s' % fn, raw2)
t = append(t, s)
result = odo(t, list)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_engine_metadata_caching():
with tmpfile('db') as fn:
engine = resource('sqlite:///' + fn)
a = resource(
'sqlite:///' + fn + '::a', dshape=dshape('var * {x: int}'))
b = resource(
'sqlite:///' + fn + '::b', dshape=dshape('var * {y: int}'))
assert a.metadata is b.metadata
assert engine is a.bind is b.bind
def test_copy_one_table_to_a_foreign_engine():
data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
with tmpfile('db') as fn2:
src = into('sqlite:///%s::points' % fn1, data, dshape=ds)
tgt = into('sqlite:///%s::points' % fn2,
sa.select([src]), dshape=ds)
assert into(set, src) == into(set, tgt)
assert into(set, data) == into(set, tgt)
def test_select_to_series_retains_name():
data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
points = odo(data, 'sqlite:///%s::points' % fn1, dshape=ds)
sel = sa.select([(points.c.x + 1).label('x')])
series = odo(sel, pd.Series)
assert series.name == 'x'
assert odo(series, list) == [x + 1 for x, _ in data]
def test_empty_select_to_empty_frame():
# data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
points = resource('sqlite:///%s::points' % fn1, dshape=ds)
sel = sa.select([points])
df = odo(sel, pd.DataFrame)
assert df.empty
assert df.columns.tolist() == ['x', 'y']
def test_discover_foreign_keys():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no'])
expected = dshape("""var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn,
dshape=expected,
foreign_keys=dict(product_no=products.c.product_no))
result = discover(orders)
assert result == expected
def test_invalid_foreign_keys():
with tmpfile('db') as fn:
expected = dshape("""var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}""")
with pytest.raises(TypeError):
resource('sqlite:///%s::orders' % fn, dshape=expected)
def test_foreign_keys_auto_construct():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no'])
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
foreign_keys=dict(product_no=products.c.product_no),
primary_key=['order_id'])
assert discover(orders) == dshape("""
var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}
""")
def test_foreign_keys_bad_field():
with tmpfile('db') as fn:
expected = dshape("""var * {
order_id: int32,
product_no: int64,
quantity: ?int32
}""")
with pytest.raises(TypeError):
resource('sqlite:///%s::orders' % fn, dshape=expected,
foreign_keys=dict(foo='products.product_no'))
@pytest.fixture
def recursive_fkey():
return sa.Table(
'employees',
sa.MetaData(),
sa.Column('eid', sa.BIGINT, primary_key=True),
sa.Column('name', sa.TEXT),
sa.Column('mgr_eid', sa.BIGINT, sa.ForeignKey('employees.eid'),
nullable=False)
)
def test_recursive_foreign_key(recursive_fkey):
expected = dshape("""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, {eid: int64, name: ?string, mgr_eid: int64}]
}
""")
assert discover(recursive_fkey) == expected
def test_create_recursive_foreign_key():
with tmpfile('.db') as fn:
t = resource('sqlite:///%s::employees' % fn,
dshape="""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, T]
}""", foreign_keys=dict(mgr_eid='employees.eid'),
primary_key=['eid'])
result = discover(t)
expected = dshape("""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, {eid: int64, name: ?string, mgr_eid: int64}]
}
""")
assert result == expected
def test_compound_primary_key():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no', 'product_sku'])
assert len(products.primary_key) == 2
assert (products.primary_key.columns['product_no'] is
products.c.product_no)
assert (products.primary_key.columns['product_sku'] is
products.c.product_sku)
def test_compound_primary_key_with_fkey():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no', 'product_sku'])
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
product_sku: map[int32, U],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
primary_key=['order_id'],
foreign_keys={
'product_no': products.c.product_no,
'product_sku': products.c.product_sku
})
assert discover(orders) == dshape(
"""var * {
order_id: int32,
product_no: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
product_sku: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
quantity: ?int32
}
"""
)
def test_compound_primary_key_with_single_reference():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""", primary_key=['product_no', 'product_sku'])
# TODO: should this fail everywhere? e.g., this fails in postgres, but
# not in sqlite because postgres doesn't allow partial foreign keys
# might be best to let the backend handle this
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
foreign_keys=dict(product_no=products.c.product_no),
primary_key=['order_id'])
assert discover(orders) == dshape(
"""var * {
order_id: int32,
product_no: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
quantity: ?int32
}
"""
)
def test_foreign_keys_as_compound_primary_key():
with tmpfile('db') as fn:
suppliers = resource(
'sqlite:///%s::suppliers' % fn,
dshape='var * {id: int64, name: string}',
primary_key=['id']
)
parts = resource(
'sqlite:///%s::parts' % fn,
dshape='var * {id: int64, name: string, region: string}',
primary_key=['id']
)
suppart = resource(
'sqlite:///%s::suppart' % fn,
dshape='var * {supp_id: map[int64, T], part_id: map[int64, U]}',
foreign_keys={
'supp_id': suppliers.c.id,
'part_id': parts.c.id
},
primary_key=['supp_id', 'part_id']
)
expected = dshape("""
var * {
supp_id: map[int64, {id: int64, name: string}],
part_id: map[int64, {id: int64, name: string, region: string}]
}
""")
result = discover(suppart)
assert result == expected
def test_append_chunks():
tbl = resource('sqlite:///:memory:::test', dshape='var * {a: int, b: int}')
res = odo(
chunks(np.ndarray)((
np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]]),
)),
tbl,
)
assert res is tbl
assert (
odo(tbl, np.ndarray) == np.array(
[(0, 1),
(2, 3),
(4, 5),
(6, 7)],
dtype=[('a', '<i4'), ('b', '<i4')],
)
).all()
def test_append_array_without_column_names():
with pytest.raises(TypeError):
odo(np.zeros((2, 2)), 'sqlite:///:memory:::test')
def test_numeric_create():
tbl = resource(
'sqlite:///:memory:::test',
dshape='var * {a: ?decimal[11, 2], b: decimal[10, 6]}'
)
assert tbl.c.a.nullable
assert not tbl.c.b.nullable
assert isinstance(tbl.c.a.type, sa.NUMERIC)
assert isinstance(tbl.c.b.type, sa.NUMERIC)
def test_numeric_append():
tbl = resource(
'sqlite:///:memory:::test',
dshape='var * {a: decimal[11, 2], b: ?decimal[10, 6]}'
)
data = [(1.0, 2.0), (2.0, 3.0)]
tbl = odo(data, tbl)
assert odo(tbl, list) == list(map(
lambda row: tuple(map(Decimal, row)),
tbl.select().execute().fetchall()
))
def test_discover_float_and_real_core_types():
assert discover(sa.FLOAT()) == float64
assert discover(sa.REAL()) == float32
def test_string_dshape_doc_example():
x = np.zeros((10, 2))
with tmpfile('.db') as fn:
t = odo(
x, 'sqlite:///%s::x' % fn, dshape='var * {a: float64, b: float64}'
)
assert all(row == (0, 0) for row in t.select().execute().fetchall())
def test_decimal_conversion():
data = [(1.0,), (2.0,)]
with tmpfile('.db') as fn:
t = odo(data, 'sqlite:///%s::x' % fn, dshape='var * {x: decimal[11, 2]}')
result = odo(sa.select([sa.func.sum(t.c.x)]), Decimal)
assert result == sum(Decimal(r[0]) for r in data)
def test_append_empty_iterator_returns_table():
with tmpfile('.db') as fn:
t = resource('sqlite:///%s::x' % fn, dshape='var * {a: int32}')
assert odo(iter([]), t) is t
def test_pass_non_hashable_arg_to_create_engine():
with tmpfile('.db') as fn:
r = partial(
resource,
'sqlite:///%s::t' % fn,
connect_args={},
dshape='var * {a: int32}',
)
assert r() is r()
s = partial(
resource,
'sqlite:///:memory:::t',
connect_args={},
dshape='var * {a: int32}',
)
assert s() is not s()
def test_nullable_foreign_key():
"""Test for issue #554"""
engine = sa.create_engine('sqlite://')
metadata = sa.MetaData(bind=engine)
T1 = sa.Table(
'NullableForeignKeyDemo',
metadata,
sa.Column('pkid', sa.Integer, primary_key=True),
sa.Column('label_id', sa.Integer,
sa.ForeignKey("ForeignKeyLabels.pkid"), nullable=True),
)
T2 = sa.Table(
'ForeignKeyLabels',
metadata,
sa.Column('pkid', sa.Integer, primary_key=True),
sa.Column('label', sa.String),
)
metadata.create_all()
x = np.arange(10)
records1 = [
{'pkid': idx, 'label_id': int(value)}
for idx, value
in enumerate(x[::-1])
]
records1[-1]['label_id'] = None # foreign-key is nullable!
records2 = [
{'pkid': int(pkid), 'label': chr(pkid + 65)}
for pkid in x
]
with engine.connect() as conn:
conn.execute(T1.insert(), records1)
conn.execute(T2.insert(), records2)
ds = discover_sqlalchemy_selectable(T1)
# The nullable key should be an Option instance
assert isinstance(ds.measure['label_id'].key, Option)
dtype = [('pkid', np.int32), ('label_id', object)]
expected = np.rec.fromarrays([x, x[::-1]], dtype=dtype)
expected = pd.DataFrame(expected)
expected.iloc[-1, -1] = None
actual = odo(T1, pd.DataFrame)
assert actual.equals(expected)
def test_transaction():
with tmpfile('.db') as fn:
rsc = resource('sqlite:///%s::table' % fn, dshape='var * {a: int}')
data = [(1,), (2,), (3,)]
conn_1 = rsc.bind.connect()
conn_2 = rsc.bind.connect()
trans_1 = conn_1.begin()
conn_2.begin()
odo(data, rsc, bind=conn_1)
# inside the transaction the write should be there
assert odo(rsc, list, bind=conn_1) == data
# outside of a transaction or in a different transaction the write is not
# there
assert odo(rsc, list) == odo(rsc, list, bind=conn_2) == []
trans_1.commit()
# now the data should appear outside the transaction
assert odo(rsc, list) == odo(rsc, list, bind=conn_2) == data
| {
"repo_name": "quantopian/odo",
"path": "odo/backends/tests/test_sql.py",
"copies": "1",
"size": "30276",
"license": "bsd-3-clause",
"hash": 8226027051478876000,
"line_mean": 32.161007667,
"line_max": 114,
"alpha_frac": 0.5120557537,
"autogenerated": false,
"ratio": 3.644637053087757,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46566928067877567,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('sqlalchemy')
import os
from decimal import Decimal
import numpy as np
import pandas as pd
import sqlalchemy as sa
import toolz as tz
from datashape import discover, dshape, float32, float64
import datashape
from odo.backends.sql import (
dshape_to_table, create_from_datashape, dshape_to_alchemy
)
from odo.utils import tmpfile, raises
from odo import convert, append, resource, discover, into, odo, chunks
def test_resource():
sql = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
assert sql.name == 'mytable'
assert isinstance(sql.bind, sa.engine.base.Engine)
assert set(c.name for c in sql.c) == set(['x', 'y'])
def test_append_and_convert_round_trip():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('bank', metadata,
sa.Column('name', sa.String, primary_key=True),
sa.Column('balance', sa.Integer))
t.create()
data = [('Alice', 1), ('Bob', 2)]
append(t, data)
assert convert(list, t) == data
def test_plus_must_have_text():
with pytest.raises(NotImplementedError):
resource('redshift+://user:pass@host:1234/db')
def test_resource_on_file():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri, 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri + '::' + 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
def test_resource_to_engine():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
r = resource(uri)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
def test_resource_to_engine_to_create_tables():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
ds = datashape.dshape('{mytable: var * {name: string, amt: int}}')
r = resource(uri, dshape=ds)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
assert discover(r) == ds
def test_discover():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('timestamp', sa.DateTime, primary_key=True))
assert discover(s) == \
dshape('var * {name: ?string, amount: ?int32, timestamp: datetime}')
def test_discover_numeric_column():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('name', metadata,
sa.Column('name', sa.types.NUMERIC),)
assert discover(s)
def test_discover_null_columns():
assert dshape(discover(sa.Column('name', sa.String, nullable=True))) == \
dshape('{name: ?string}')
assert dshape(discover(sa.Column('name', sa.String, nullable=False))) == \
dshape('{name: string}')
def test_discover_selectable():
t = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
q = sa.select([t.c.x]).limit(5)
assert discover(q) == dshape('var * {x: int}')
def test_discover_fixed_length_string():
t = resource('sqlite:///:memory:::mytable',
dshape='var * {x: string[30]}')
assert discover(t) == dshape('var * {x: string[30]}')
def single_table_engine():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
t.create()
return engine, t
def test_select_to_iterator():
engine, t = single_table_engine()
append(t, [('Alice', 100), ('Bob', 200)])
sel = sa.select([t.c.amount + 1])
assert convert(list, sel) == [(101,), (201,)]
assert convert(list, sel, dshape=dshape('var * int')) == [101, 201]
sel2 = sa.select([sa.sql.func.sum(t.c.amount)])
assert convert(int, sel2, dshape=dshape('int')) == 300
sel3 = sa.select([t])
result = convert(list, sel3, dshape=discover(t))
assert type(result[0]) is tuple
def test_discovery_engine():
engine, t = single_table_engine()
assert discover(engine, 'accounts') == discover(t)
assert str(discover(engine)) == str(discover({'accounts': t}))
def test_discovery_metadata():
engine, t = single_table_engine()
metadata = t.metadata
assert str(discover(metadata)) == str(discover({'accounts': t}))
def test_discover_views():
engine, t = single_table_engine()
metadata = t.metadata
with engine.connect() as conn:
conn.execute('''CREATE VIEW myview AS
SELECT name, amount
FROM accounts
WHERE amount > 0''')
assert str(discover(metadata)) == str(
discover({'accounts': t, 'myview': t}))
def test_extend_empty():
engine, t = single_table_engine()
assert not convert(list, t)
append(t, [])
assert not convert(list, t)
def test_dshape_to_alchemy():
assert dshape_to_alchemy('string') == sa.Text
assert isinstance(dshape_to_alchemy('string[40]'), sa.String)
assert not isinstance(dshape_to_alchemy('string["ascii"]'), sa.Unicode)
assert isinstance(dshape_to_alchemy('string[40, "U8"]'), sa.Unicode)
assert dshape_to_alchemy('string[40]').length == 40
assert dshape_to_alchemy('float32') == sa.REAL
assert dshape_to_alchemy('float64') == sa.FLOAT
def test_dshape_to_table():
t = dshape_to_table('bank', '{name: string, amount: int}')
assert isinstance(t, sa.Table)
assert t.name == 'bank'
assert [c.name for c in t.c] == ['name', 'amount']
td_freqs = list(zip(['D', 'h', 'm', 's', 'ms', 'us', 'ns'],
[0, 0, 0, 0, 3, 6, 9],
[9, 0, 0, 0, 0, 0, 0]))
@pytest.mark.parametrize(['freq', 'secp', 'dayp'], td_freqs)
def test_dshape_to_table_with_timedelta(freq, secp, dayp):
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert isinstance(t, sa.Table)
assert t.name == 'td_bank'
assert isinstance(t.c.duration.type, sa.types.Interval)
assert t.c.duration.type.second_precision == secp
assert t.c.duration.type.day_precision == dayp
@pytest.mark.xfail(raises=NotImplementedError)
def test_dshape_to_table_month():
ds = '{name: string, amount: int, duration: timedelta[unit="M"]}'
dshape_to_table('td_bank', ds)
@pytest.mark.xfail(raises=NotImplementedError)
def test_dshape_to_table_year():
ds = '{name: string, amount: int, duration: timedelta[unit="Y"]}'
dshape_to_table('td_bank', ds)
@pytest.mark.parametrize('freq', ['D', 's', 'ms', 'us', 'ns'])
def test_timedelta_sql_discovery(freq):
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert discover(t).measure['duration'] == datashape.TimeDelta(freq)
@pytest.mark.parametrize('freq', ['h', 'm'])
def test_timedelta_sql_discovery_hour_minute(freq):
# these always compare equal to a seconds timedelta, because no data loss
# will occur with this heuristic. this implies that the sa.Table was
# constructed with day_precision == 0 and second_precision == 0
ds = '{name: string, amount: int, duration: timedelta[unit="%s"]}' % freq
t = dshape_to_table('td_bank', ds)
assert discover(t).measure['duration'] == datashape.TimeDelta('s')
prec = {
's': 0,
'ms': 3,
'us': 6,
'ns': 9
}
@pytest.mark.parametrize('freq', list(prec.keys()))
def test_discover_postgres_intervals(freq):
precision = prec.get(freq)
typ = sa.dialects.postgresql.base.INTERVAL(precision=precision)
t = sa.Table('t', sa.MetaData(), sa.Column('dur', typ))
assert discover(t) == dshape('var * {dur: ?timedelta[unit="%s"]}' % freq)
# between postgresql and oracle, only oracle has support for day intervals
@pytest.mark.parametrize('freq', ['D'] + list(prec.keys()))
def test_discover_oracle_intervals(freq):
typ = sa.dialects.oracle.base.INTERVAL(day_precision={'D': 9}.get(freq),
second_precision=prec.get(freq, 0))
t = sa.Table('t', sa.MetaData(), sa.Column('dur', typ))
assert discover(t) == dshape('var * {dur: ?timedelta[unit="%s"]}' % freq)
def test_create_from_datashape():
engine = sa.create_engine('sqlite:///:memory:')
ds = dshape('''{bank: var * {name: string, amount: int},
points: var * {x: int, y: int}}''')
engine = create_from_datashape(engine, ds)
assert discover(engine) == ds
def test_into_table_iterator():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = dshape_to_table('points', '{x: int, y: int}', metadata=metadata)
t.create()
data = [(1, 1), (2, 4), (3, 9)]
append(t, data)
assert convert(list, t) == data
assert isinstance(convert(list, t)[0], tuple)
t2 = dshape_to_table('points2', '{x: int, y: int}', metadata=metadata)
t2.create()
data2 = [{'x': 1, 'y': 1}, {'x': 2, 'y': 4}, {'x': 3, 'y': 9}]
append(t2, data2)
assert convert(list, t2) == data
def test_sql_field_names_disagree_on_order():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
append(r, [(1, 2), (10, 20)], dshape=dshape('{y: int, x: int}'))
assert convert(set, r) == set([(2, 1), (20, 10)])
def test_sql_field_names_disagree_on_names():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
assert raises(Exception, lambda: append(r, [(1, 2), (10, 20)],
dshape=dshape('{x: int, z: int}')))
def test_resource_on_dialects():
assert (resource.dispatch('mysql://foo') is
resource.dispatch('mysql+pymysql://foo'))
assert (resource.dispatch('never-before-seen-sql://foo') is
resource.dispatch('mysql://foo'))
@pytest.yield_fixture
def sqlite_file():
try:
yield 'sqlite:///db.db'
finally:
os.remove('db.db')
def test_append_from_select(sqlite_file):
# we can't test in memory here because that creates two independent
# databases
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'U5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'U5')])
t = into('%s::t' % sqlite_file, raw)
s = into('%s::s' % sqlite_file, raw2)
t = append(t, s.select())
result = into(list, t)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_append_from_table():
# we can't test in memory here because that creates two independent
# databases
with tmpfile('db') as fn:
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'U5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'U5')])
t = into('sqlite:///%s::t' % fn, raw)
s = into('sqlite:///%s::s' % fn, raw2)
t = append(t, s)
result = odo(t, list)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_engine_metadata_caching():
with tmpfile('db') as fn:
engine = resource('sqlite:///' + fn)
a = resource(
'sqlite:///' + fn + '::a', dshape=dshape('var * {x: int}'))
b = resource(
'sqlite:///' + fn + '::b', dshape=dshape('var * {y: int}'))
assert a.metadata is not b.metadata
assert engine is not a.bind
assert engine is not b.bind
def test_copy_one_table_to_a_foreign_engine():
data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
with tmpfile('db') as fn2:
src = into('sqlite:///%s::points' % fn1, data, dshape=ds)
tgt = into('sqlite:///%s::points' % fn2,
sa.select([src]), dshape=ds)
assert into(set, src) == into(set, tgt)
assert into(set, data) == into(set, tgt)
def test_select_to_series_retains_name():
data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
points = odo(data, 'sqlite:///%s::points' % fn1, dshape=ds)
sel = sa.select([(points.c.x + 1).label('x')])
series = odo(sel, pd.Series)
assert series.name == 'x'
assert odo(series, list) == [x + 1 for x, _ in data]
def test_empty_select_to_empty_frame():
# data = [(1, 1), (2, 4), (3, 9)]
ds = dshape('var * {x: int, y: int}')
with tmpfile('db') as fn1:
points = resource('sqlite:///%s::points' % fn1, dshape=ds)
sel = sa.select([points])
df = odo(sel, pd.DataFrame)
assert df.empty
assert df.columns.tolist() == ['x', 'y']
def test_discover_foreign_keys():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no'])
expected = dshape("""var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn,
dshape=expected,
foreign_keys=dict(product_no=products.c.product_no))
result = discover(orders)
assert result == expected
def test_invalid_foreign_keys():
with tmpfile('db') as fn:
expected = dshape("""var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}""")
with pytest.raises(TypeError):
resource('sqlite:///%s::orders' % fn, dshape=expected)
def test_foreign_keys_auto_construct():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no'])
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
foreign_keys=dict(product_no=products.c.product_no),
primary_key=['order_id'])
assert discover(orders) == dshape("""
var * {
order_id: int32,
product_no: map[int32, {
product_no: int32,
name: ?string,
price: ?float64
}],
quantity: ?int32
}
""")
def test_foreign_keys_bad_field():
with tmpfile('db') as fn:
expected = dshape("""var * {
order_id: int32,
product_no: int64,
quantity: ?int32
}""")
with pytest.raises(TypeError):
resource('sqlite:///%s::orders' % fn, dshape=expected,
foreign_keys=dict(foo='products.product_no'))
@pytest.fixture
def recursive_fkey():
return sa.Table(
'employees',
sa.MetaData(),
sa.Column('eid', sa.BIGINT, primary_key=True),
sa.Column('name', sa.TEXT),
sa.Column('mgr_eid', sa.BIGINT, sa.ForeignKey('employees.eid'),
nullable=False)
)
def test_recursive_foreign_key(recursive_fkey):
expected = dshape("""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, {eid: int64, name: ?string, mgr_eid: int64}]
}
""")
assert discover(recursive_fkey) == expected
def test_create_recursive_foreign_key():
with tmpfile('.db') as fn:
t = resource('sqlite:///%s::employees' % fn,
dshape="""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, T]
}""", foreign_keys=dict(mgr_eid='employees.eid'),
primary_key=['eid'])
result = discover(t)
expected = dshape("""
var * {
eid: int64,
name: ?string,
mgr_eid: map[int64, {eid: int64, name: ?string, mgr_eid: int64}]
}
""")
assert result == expected
def test_compound_primary_key():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no', 'product_sku'])
assert len(products.primary_key) == 2
assert (products.primary_key.columns['product_no'] is
products.c.product_no)
assert (products.primary_key.columns['product_sku'] is
products.c.product_sku)
def test_compound_primary_key_with_fkey():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""",
primary_key=['product_no', 'product_sku'])
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
product_sku: map[int32, U],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
primary_key=['order_id'],
foreign_keys={
'product_no': products.c.product_no,
'product_sku': products.c.product_sku
})
assert discover(orders) == dshape(
"""var * {
order_id: int32,
product_no: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
product_sku: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
quantity: ?int32
}
"""
)
def test_compound_primary_key_with_single_reference():
with tmpfile('db') as fn:
products = resource('sqlite:///%s::products' % fn,
dshape="""
var * {
product_no: int32,
product_sku: string,
name: ?string,
price: ?float64
}
""", primary_key=['product_no', 'product_sku'])
# TODO: should this fail everywhere? e.g., this fails in postgres, but
# not in sqlite because postgres doesn't allow partial foreign keys
# might be best to let the backend handle this
ds = dshape("""var * {
order_id: int32,
product_no: map[int32, T],
quantity: ?int32
}""")
orders = resource('sqlite:///%s::orders' % fn, dshape=ds,
foreign_keys=dict(product_no=products.c.product_no),
primary_key=['order_id'])
assert discover(orders) == dshape(
"""var * {
order_id: int32,
product_no: map[int32, {product_no: int32, product_sku: string, name: ?string, price: ?float64}],
quantity: ?int32
}
"""
)
def test_foreign_keys_as_compound_primary_key():
with tmpfile('db') as fn:
suppliers = resource(
'sqlite:///%s::suppliers' % fn,
dshape='var * {id: int64, name: string}',
primary_key=['id']
)
parts = resource(
'sqlite:///%s::parts' % fn,
dshape='var * {id: int64, name: string, region: string}',
primary_key=['id']
)
suppart = resource(
'sqlite:///%s::suppart' % fn,
dshape='var * {supp_id: map[int64, T], part_id: map[int64, U]}',
foreign_keys={
'supp_id': suppliers.c.id,
'part_id': parts.c.id
},
primary_key=['supp_id', 'part_id']
)
expected = dshape("""
var * {
supp_id: map[int64, {id: int64, name: string}],
part_id: map[int64, {id: int64, name: string, region: string}]
}
""")
result = discover(suppart)
assert result == expected
def test_append_chunks():
tbl = resource('sqlite:///:memory:::test', dshape='var * {a: int, b: int}')
res = odo(
chunks(np.ndarray)((
np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]]),
)),
tbl,
)
assert res is tbl
assert (
odo(tbl, np.ndarray) == np.array(
[(0, 1),
(2, 3),
(4, 5),
(6, 7)],
dtype=[('a', '<i4'), ('b', '<i4')],
)
).all()
def test_append_array_without_column_names():
with pytest.raises(TypeError):
odo(np.zeros((2, 2)), 'sqlite:///:memory:::test')
def test_numeric_create():
tbl = resource(
'sqlite:///:memory:::test',
dshape='var * {a: ?decimal[11, 2], b: decimal[10, 6]}'
)
assert tbl.c.a.nullable
assert not tbl.c.b.nullable
assert isinstance(tbl.c.a.type, sa.NUMERIC)
assert isinstance(tbl.c.b.type, sa.NUMERIC)
def test_numeric_append():
tbl = resource(
'sqlite:///:memory:::test',
dshape='var * {a: decimal[11, 2], b: ?decimal[10, 6]}'
)
data = [(1.0, 2.0), (2.0, 3.0)]
tbl = odo(data, tbl)
assert odo(tbl, list) == list(map(
lambda row: tuple(map(Decimal, row)),
tbl.select().execute().fetchall()
))
def test_discover_float_and_real_core_types():
assert discover(sa.FLOAT()) == float64
assert discover(sa.REAL()) == float32
def test_string_dshape_doc_example():
x = np.zeros((10, 2))
with tmpfile('.db') as fn:
t = odo(
x, 'sqlite:///%s::x' % fn, dshape='var * {a: float64, b: float64}'
)
assert all(row == (0, 0) for row in t.select().execute().fetchall())
def test_decimal_conversion():
data = [(1.0,), (2.0,)]
with tmpfile('.db') as fn:
t = odo(data, 'sqlite:///%s::x' % fn, dshape='var * {x: decimal[11, 2]}')
result = odo(sa.select([sa.func.sum(t.c.x)]), Decimal)
assert result == sum(Decimal(r[0]) for r in data)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/tests/test_sql.py",
"copies": "1",
"size": "25128",
"license": "bsd-3-clause",
"hash": -5406466391290609000,
"line_mean": 33.1413043478,
"line_max": 114,
"alpha_frac": 0.4980101878,
"autogenerated": false,
"ratio": 3.727636849132176,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9722113718357835,
"avg_score": 0.0007066637148680691,
"num_lines": 736
} |
from __future__ import absolute_import, division, print_function
import pytest
sqlalchemy = pytest.importorskip('sqlalchemy')
sa = sqlalchemy
import re
import datashape
from blaze.compute.sql import (compute, computefull, select, lower_column,
compute_up)
from blaze.expr import *
from blaze.compatibility import xfail
from toolz import unique
from pandas import DataFrame
from odo import into, resource
from blaze.utils import tmpfile
t = symbol('t', 'var * {name: string, amount: int, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
tbig = symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
def normalize(s):
s2 = ' '.join(s.strip().split()).lower().replace('_', '')
s3 = re.sub('alias\d*', 'alias', s2)
return s3
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s, post_compute=False)) == \
str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s, post_compute=False)) == \
str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(compute(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s, post_compute=False)) == \
str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s, post_compute=False)) == \
str(s.c.amount * s.c.id)
assert str(compute(t['amount'] * 2, s, post_compute=False)) == \
str(s.c.amount * 2)
assert str(compute(2 * t['amount'], s, post_compute=False)) == \
str(2 * s.c.amount)
assert (str(compute(~(t['amount'] > 10), s, post_compute=False)) ==
"~(accounts.amount > :amount_1)")
assert str(compute(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
# test sort on join
result = compute(joined.sort('amount'), {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
ORDER BY amounts.amount""")
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
expected1 = """
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1"""
expected2 = """
SELECT alias.name, alias.amount, ids.id
FROM (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) AS alias
JOIN ids ON alias.name = ids.name"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s, post_compute=False)) == \
str(sa.func.exp(s.c.amount))
def test_unary_op():
assert str(compute(-t['amount'], s, post_compute=False)) == \
str(-s.c.amount)
def test_reductions():
assert str(compute(sum(t['amount']), s, post_compute=False)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(sum(t['amount']), s, post_compute=False).name
def test_reduction_with_invalid_axis_argument():
with pytest.raises(ValueError):
compute(t.amount.mean(axis=1))
with pytest.raises(ValueError):
compute(t.count(axis=1))
with pytest.raises(ValueError):
compute(t[['amount', 'id']].count(axis=1))
def test_nelements():
rhs = str(compute(t.count(), s))
assert str(compute(t.nelements(), s)) == rhs
assert str(compute(t.nelements(axis=None), s)) == rhs
assert str(compute(t.nelements(axis=0), s)) == rhs
assert str(compute(t.nelements(axis=(0,)), s)) == rhs
@pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for"
" Record datashapes")
def test_nelements_axis_1():
assert compute(nelements(t, axis=1), s) == len(s.columns)
def test_count_on_table():
result = compute(t.count(), s)
assert normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
result = compute(t[t.amount > 0].count(), s)
assert (
normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts
WHERE accounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
WHERE accounts.amount > :amount_1) as alias"""))
def test_distinct():
result = str(compute(Distinct(t['amount']), s, post_compute=False))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sqlalchemy.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], total=t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('total')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], total=t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT alias.name, sum(alias.amount) as total
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS ID
FROM accounts
LIMIT :param_1) as alias
GROUP BY alias.name"""
expected = """
SELECT accounts.name, sum(accounts.amount) as total
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('total')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS total
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_by_summary_single_column():
expr = by(t.name, n=t.name.count(), biggest=t.name.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(s.c.amount))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sqlalchemy.desc(s.c.amount)))
def test_sort_on_distinct():
assert normalize(str(compute(t.amount.sort(), s))) == normalize("""
SELECT accounts.amount
FROM accounts
ORDER BY accounts.amount""")
assert normalize(str(compute(t.amount.distinct().sort(), s))) == normalize("""
SELECT DISTINCT accounts.amount as amount
FROM accounts
ORDER BY amount""")
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert str(compute((t['amount'] * 10).label('foo'), s, post_compute=False))\
== str((s.c.amount * 10).label('foo'))
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), s)
expected = select([s.c.name.label('NAME'), s.c.amount, s.c.id.label('ID')])
assert str(result) == str(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_outer_join():
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
engine = resource(uri)
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = resource(uri, 'left', dshape=L.dshape)
into(left, _left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = resource(uri, 'right', dshape=R.dshape)
into(right, _right)
conn = engine.connect()
query = compute(join(L, R, how='inner'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'),
{L: left, R: right},
post_compute=False)
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = symbol('city', discover(city))
tfriends = symbol('friends', discover(friends))
tname = symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
result = compute(expr, ns)
expected1 = """
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
"""
expected2 = """
SELECT alias.a, alias.b, alias.name, place.city, place.country
FROM (SELECT friends.a AS a, friends.b AS b, name.name AS name
FROM friends JOIN name ON friends.a = name.id) AS alias
JOIN place ON alias.a = place.id
"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_strlen():
expr = t.name.strlen()
result = str(compute(expr, s))
expected = "SELECT length(accounts.name) as length1 FROM accounts"
assert normalize(result) == normalize(expected)
def test_columnwise_on_complex_selection():
assert normalize(str(select(compute(t[t.amount > 0].amount + 1, s)))) == \
normalize("""
SELECT accounts.amount + :amount_1 AS anon_1
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) == \
normalize("""
SELECT sum(accounts.id) as id_sum
FROM accounts
WHERE accounts.amount > :amount_1 """)
def test_clean_summary_by_where():
t2 = t[t.id ==1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
sel = select(name).where(name.c.id > 10)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
expected1 = """
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1"""
expected2 = """
SELECT alias.id, alias.name, place.city, place.country
FROM (SELECT name.id as id, name.name AS name
FROM name
WHERE name.id > :id_1) AS alias
JOIN place ON alias.id = place.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
result = compute(expr, ns)
expected1 = """
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1"""
expected2 = """
SELECT alias.country, name.name
FROM name
JOIN (SELECT place.id AS id, place.city AS city, place.country AS country
FROM place
WHERE place.city = :city_1) AS alias
ON name.id = alias_6.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_join_on_same_table():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
expr = join(t, t, 'a')
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT tab_left.a, tab_left.b, tab_right.b
FROM tab AS tab_left JOIN tab AS tab_right
ON tab_left.a = tab_right.a
""")
expr = join(t, t, 'a').b_left.sum()
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT sum(tab_left.b) as b_left_sum
FROM tab AS tab_left JOIN tab AS tab_right
ON tab_left.a = tab_right.a
""")
expr = join(t, t, 'a')
expr = summary(total=expr.a.sum(), smallest=expr.b_right.min())
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT min(tab_right.b) as smallest, sum(tab_left.a) as total
FROM tab AS tab_left JOIN tab AS tab_right
ON tab_left.a = tab_right.a
""")
def test_field_access_on_engines():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
city = sa.Table('city', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
city.create()
s = symbol('s', discover(engine))
result = compute_up(s.city, engine)
assert isinstance(result, sa.Table)
assert result.name == 'city'
def test_computation_directly_on_sqlalchemy_Tables():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
s = symbol('s', discover(name))
result = into(list, compute(s.id + 1, name))
assert not isinstance(result, sa.sql.Selectable)
assert list(result) == []
def test_computation_directly_on_metadata():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
s = symbol('s', discover(metadata))
result = compute(s.name, {s: metadata}, post_compute=False)
assert result == name
sql_bank = sa.Table('bank', sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
sql_cities = sa.Table('cities', sa.MetaData(),
sa.Column('name', sa.String),
sa.Column('city', sa.String))
bank = Symbol('bank', discover(sql_bank))
cities = Symbol('cities', discover(sql_cities))
def test_aliased_views_with_two_group_bys():
expr = by(bank.name, total=bank.amount.sum())
expr2 = by(expr.total, count=expr.name.count())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(alias.name) as count
FROM (SELECT bank.name AS name, sum(bank.amount) AS total
FROM bank
GROUP BY bank.name) as alias
GROUP BY alias.total
""")
def test_aliased_views_with_join():
joined = join(bank, cities)
expr = by(joined.city, total=joined.amount.sum())
expr2 = by(expr.total, count=expr.city.nunique())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(DISTINCT alias.city) AS count
FROM (SELECT cities.city AS city, sum(bank.amount) AS total
FROM bank
JOIN cities ON bank.name = cities.name
GROUP BY cities.city) as alias
GROUP BY alias.total
""")
def test_select_field_on_alias():
result = compute_up(t.amount, select(s).limit(10).alias('foo'))
assert normalize(str(select(result))) == normalize("""
SELECT foo.amount
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
LIMIT :param_1) as foo""")
@pytest.mark.xfail(raises=Exception,
reason="sqlalchemy.join seems to drop unnecessary tables")
def test_join_on_single_column():
expr = join(cities[['name']], bank)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
expr = join(bank, cities.name)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
def test_aliased_views_more():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
expr = join(by(L.x, y_total=L.y.sum()),
R)
result = compute(expr, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x """)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
result2 = compute(expr2, {L: lhs, R: rhs})
assert (
normalize(str(result2)) == normalize("""
SELECT alias_2.w, count(alias_2.x) as count, sum(alias_2.y_total) as total2
FROM (SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x) AS alias_2
GROUP BY alias_2.w""")
or
normalize(str(result2)) == normalize("""
SELECT bbb.w, count(alias.x) as count, sum(alias.y_total) as total2
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) as alias
JOIN bbb ON alias.x = bbb.x
GROUP BY bbb.w"""))
def test_aliased_views_with_computation():
engine = sa.create_engine('sqlite:///:memory:')
df_aaa = DataFrame({'x': [1, 2, 3, 2, 3],
'y': [2, 1, 2, 3, 1],
'z': [3, 3, 3, 1, 2]})
df_bbb = DataFrame({'w': [1, 2, 3, 2, 3],
'x': [2, 1, 2, 3, 1],
'y': [3, 3, 3, 1, 2]})
df_aaa.to_sql('aaa', engine)
df_bbb.to_sql('bbb', engine)
metadata = sa.MetaData(engine)
metadata.reflect()
sql_aaa = metadata.tables['aaa']
sql_bbb = metadata.tables['bbb']
L = Symbol('aaa', discover(df_aaa))
R = Symbol('bbb', discover(df_bbb))
expr = join(by(L.x, y_total=L.y.sum()),
R)
a = compute(expr, {L: df_aaa, R: df_bbb})
b = compute(expr, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
a = compute(expr2, {L: df_aaa, R: df_bbb})
b = compute(expr2, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr3 = by(expr.x, count=expr.y_total.count())
a = compute(expr3, {L: df_aaa, R: df_bbb})
b = compute(expr3, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr4 = join(expr2, R)
a = compute(expr4, {L: df_aaa, R: df_bbb})
b = compute(expr4, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
""" # Takes a while
expr5 = by(expr4.count, total=(expr4.x + expr4.y).sum())
a = compute(expr5, {L: df_aaa, R: df_bbb})
b = compute(expr5, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
"""
def test_distinct_count_on_projection():
expr = t[['amount']].distinct().count()
result = compute(expr, {t: s})
assert (
normalize(str(result)) == normalize("""
SELECT count(DISTINCT accounts.amount)
FROM accounts""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.amount) as count
FROM (SELECT DISTINCT accounts.amount AS amount
FROM accounts) as alias"""))
# note that id is the primary key
expr = t[['amount', 'id']].distinct().count()
result = compute(expr, {t: s})
assert normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT DISTINCT accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
def test_join_count():
ds = datashape.dshape('{t1: var * {x: int, y: int}, t2: var * {a: int, b: int}}')
engine = resource('sqlite:///:memory:', dshape=ds)
db = symbol('db', ds)
expr = join(db.t1[db.t1.x > -1], db.t2, 'x', 'a').count()
result = compute(expr, {db: engine}, post_compute=False)
expected1 = """
SELECT count(alias.x) as count
FROM (SELECT t1.x AS x, t1.y AS y, t2.b AS b
FROM t1 JOIN t2 ON t1.x = t2.a
WHERE t1.x > ?) as alias
"""
expected2 = """
SELECT count(alias2.x) AS __count
FROM (SELECT alias1.x AS x, alias1.y AS y, t2.b AS b
FROM (SELECT t1.x AS x, t1.y AS y
FROM t1
WHERE t1.x > ?) AS alias1
JOIN t2 ON alias1.x = t2.a) AS alias2"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_merge_compute():
data = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
ds = datashape.dshape('var * {id: int, name: string, amount: real}')
s = symbol('s', ds)
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
into(uri + '::table', data, dshape=ds)
expr = transform(s, amount10=s.amount * 10)
result = into(list, compute(expr, {s: data}))
assert result == [(1, 'Alice', 100, 1000),
(2, 'Bob', 200, 2000),
(4, 'Dennis',400, 4000)]
def test_head_limit():
assert compute(t.head(5).head(10), s)._limit == 5
assert compute(t.head(10).head(5), s)._limit == 5
assert compute(t.head(10).head(10), s)._limit == 10
def test_no_extraneous_join():
ds = """ {event: var * {name: ?string,
operation: ?string,
datetime_nearest_receiver: ?datetime,
aircraft: ?string,
temperature_2m: ?float64,
temperature_5cm: ?float64,
humidity: ?float64,
windspeed: ?float64,
pressure: ?float64,
include: int64},
operation: var * {name: ?string,
runway: int64,
takeoff: bool,
datetime_nearest_close: ?string}}
"""
db = resource('sqlite:///:memory:', dshape=ds)
d = symbol('db', dshape=ds)
expr = join(d.event[d.event.include==True],
d.operation[['name', 'datetime_nearest_close']],
'operation', 'name')
result = compute(expr, db)
assert normalize(str(result)) == normalize("""
SELECT alias.operation, alias.name, alias.datetime_nearest_receiver,
alias.aircraft, alias.temperature_2m, alias.temperature_5cm,
alias.humidity, alias.windspeed, alias.pressure,
alias.include, alias.datetime_nearest_close
FROM (SELECT event.name AS name,
event.operation AS operation,
event.datetime_nearest_receiver AS datetime_nearest_receiver,
event.aircraft AS aircraft,
event.temperature_2m AS temperature_2m,
event.temperature_5cm AS temperature_5cm,
event.humidity AS humidity,
event.windspeed AS windspeed,
event.pressure AS pressure,
event.include AS include
FROM event WHERE event.include = 1) AS alias1
JOIN (SELECT operation.name AS name,
operation.datetime_nearest_close as datetime_nearest_close
FROM operation) AS alias2
ON alias1.operation = alias2.name
""")
def test_math():
result = compute(sin(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT sin(accounts.amount) as sin1
FROM accounts""")
result = compute(floor(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount) as floor1
FROM accounts""")
result = compute(t.amount // 2, s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount / :amount1) AS floor_1
FROM accounts""")
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/tests/test_sql_compute.py",
"copies": "1",
"size": "38303",
"license": "bsd-3-clause",
"hash": -275609333618784960,
"line_mean": 30.5770816158,
"line_max": 88,
"alpha_frac": 0.5558311359,
"autogenerated": false,
"ratio": 3.5066373706857092,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4562468506585709,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import random
from bisect import bisect_left
from itertools import cycle
from operator import itemgetter, add
from ..utils import funcname, import_required
from ..core import istask
from ..compatibility import apply
_BOKEH_MISSING_MSG = "Diagnostics plots require `bokeh` to be installed"
_TOOLZ_MISSING_MSG = "Diagnostics plots require `toolz` to be installed"
def unquote(expr):
if istask(expr):
if expr[0] in (tuple, list, set):
return expr[0](map(unquote, expr[1]))
elif (expr[0] == dict and
isinstance(expr[1], list) and
isinstance(expr[1][0], list)):
return dict(map(unquote, expr[1]))
return expr
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5]),
... 'f': (add,),
... 'g': []}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
>>> pprint_task(dsk['f'], dsk)
'add()'
>>> pprint_task(dsk['g'], dsk)
'[]'
"""
if istask(task):
func = task[0]
if func is apply:
head = funcname(task[1])
tail = ')'
args = unquote(task[2]) if len(task) > 2 else ()
kwargs = unquote(task[3]) if len(task) > 3 else {}
else:
if hasattr(func, 'funcs'):
head = '('.join(funcname(f) for f in func.funcs)
tail = ')' * len(func.funcs)
else:
head = funcname(task[0])
tail = ')'
args = task[1:]
kwargs = {}
if args or kwargs:
label_size2 = int((label_size - len(head) - len(tail)) //
(len(args) + len(kwargs)))
pprint = lambda t: pprint_task(t, keys, label_size2)
if args:
if label_size2 > 5:
args = ', '.join(pprint(t) for t in args)
else:
args = '...'
else:
args = ''
if kwargs:
if label_size2 > 5:
kwargs = ', ' + ', '.join('{0}={1}'.format(k, pprint(v))
for k, v in sorted(kwargs.items()))
else:
kwargs = ', ...'
else:
kwargs = ''
return '{0}({1}{2}{3}'.format(head, args, kwargs, tail)
elif isinstance(task, list):
if not task:
return '[]'
elif len(task) > 3:
result = pprint_task(task[:3], keys, label_size)
return result[:-1] + ', ...]'
else:
label_size2 = int((label_size - 2 - 2 * len(task)) // len(task))
args = ', '.join(pprint_task(t, keys, label_size2) for t in task)
return '[{0}]'.format(args)
else:
try:
if task in keys:
return '_'
else:
return '*'
except TypeError:
return '*'
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
funcs : iterable
Iterable of function names
"""
palettes = import_required('bokeh.palettes', _BOKEH_MISSING_MSG)
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
unique_funcs = list(sorted(tz.unique(funcs)))
n_funcs = len(unique_funcs)
palette_lookup = palettes.all_palettes[palette]
keys = list(sorted(palette_lookup.keys()))
index = keys[min(bisect_left(keys, n_funcs), len(keys) - 1)]
palette = palette_lookup[index]
# Some bokeh palettes repeat colors, we want just the unique set
palette = list(tz.unique(palette))
if len(palette) > n_funcs:
# Consistently shuffle palette - prevents just using low-range
random.Random(42).shuffle(palette)
color_lookup = dict(zip(unique_funcs, cycle(palette)))
return [color_lookup[n] for n in funcs]
def visualize(profilers, file_path=None, show=True, save=True, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
file_path : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default), the plot is saved to disk.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.io import _state
if not _state._notebook:
file_path = file_path or "profile.html"
bp.output_file(file_path)
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
f.plot_height -= 30
for f in figs[:-1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
f.plot_height -= 30
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if file_path and save:
bp.save(p)
return p
def _get_figure_keywords():
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
o = bp.Figure.properties()
o.add('tools')
return o
def plot_tasks(results, dsk, palette='Viridis', label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
keys, tasks, starts, ends, ids = zip(*results)
id_group = tz.groupby(itemgetter(4), results)
timings = dict((k, [i.end_time - i.start_time for i in v]) for (k, v) in
id_group.items())
id_lk = dict((t[0], n) for (n, t) in enumerate(sorted(timings.items(),
key=itemgetter(1), reverse=True)))
left = min(starts)
right = max(ends)
p = bp.figure(y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left], **defaults)
data = {}
data['width'] = width = [e - s for (s, e) in zip(starts, ends)]
data['x'] = [w / 2 + s - left for (w, s) in zip(width, starts)]
data['y'] = [id_lk[i] + 1 for i in ids]
data['function'] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data['color'] = get_colors(palette, funcs)
data['key'] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(source=source, x='x', y='y', height=1, width='width',
color='color', line_color='gray')
else:
p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10],
**defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = 'follow_mouse'
return p
def plot_resources(results, palette='Viridis', **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh import palettes
from bokeh.models import LinearAxis, Range1d
defaults = dict(title="Profile Results",
tools="save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults)
else:
t = mem = cpu = []
p = bp.figure(y_range=(0, 100), x_range=(0, 10), **defaults)
colors = palettes.all_palettes[palette][6]
p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU')
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {'memory': Range1d(start=(min(mem) if mem else 0),
end=(max(mem) if mem else 100))}
p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4,
legend='Memory')
p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'),
'right')
p.xaxis.axis_label = "Time (s)"
return p
def plot_cache(results, dsk, start_time, metric_name, palette='Viridis',
label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required('bokeh.plotting', _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
tz = import_required('toolz', _TOOLZ_MISSING_MSG)
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,wheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
_get_figure_keywords())
if results:
starts, ends = list(zip(*results))[3:]
tics = list(sorted(tz.unique(starts + ends)))
groups = tz.groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = [0] + list(tz.accumulate(add, tz.pluck(1, sorted(cnts.items()))))
tics = [0] + [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line('x', 'y', line_color=color, line_width=3,
source=bp.ColumnDataSource({'x': tics, 'y': val,
'label': [key for i in val]}))
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.yaxis.axis_label = "Cache Size ({0})".format(metric_name)
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
| {
"repo_name": "chrisbarber/dask",
"path": "dask/diagnostics/profile_visualize.py",
"copies": "5",
"size": "13994",
"license": "bsd-3-clause",
"hash": 3151739303329082400,
"line_mean": 32.8837772397,
"line_max": 87,
"alpha_frac": 0.5519508361,
"autogenerated": false,
"ratio": 3.602059202059202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002948089330145845,
"num_lines": 413
} |
from __future__ import absolute_import, division, print_function
import random
from weakref import WeakKeyDictionary
from .core import CallbackProperty
__all__ = ['ChoiceSeparator', 'SelectionCallbackProperty']
class ChoiceSeparator(str):
pass
class SelectionCallbackProperty(CallbackProperty):
def __init__(self, default_index=0, **kwargs):
super(SelectionCallbackProperty, self).__init__(**kwargs)
self.default_index = default_index
self._choices = WeakKeyDictionary()
self._display = WeakKeyDictionary()
self._force_next_sync = WeakKeyDictionary()
def __set__(self, instance, value):
if value is not None and value not in self._choices.get(instance, ()):
raise ValueError('value {0} is not in valid choices'.format(value))
super(SelectionCallbackProperty, self).__set__(instance, value)
def force_next_sync(self, instance):
self._force_next_sync[instance] = True
def _get_full_info(self, instance):
if self._force_next_sync.get(instance, False):
try:
return self.__get__(instance), random.random()
finally:
self._force_next_sync[instance] = False
else:
return self.__get__(instance), self.get_choices(instance), self.get_choice_labels(instance)
def get_display_func(self, instance):
return self._display.get(instance, None)
def set_display_func(self, instance, display):
self._display[instance] = display
# selection = self.__get__(instance)
# self.notify(instance, selection, selection)
def get_choices(self, instance):
return self._choices.get(instance, ())
def get_choice_labels(self, instance):
display = self._display.get(instance, str)
labels = []
for choice in self.get_choices(instance):
if isinstance(choice, ChoiceSeparator):
labels.append(str(choice))
else:
labels.append(display(choice))
return labels
def set_choices(self, instance, choices):
self._choices[instance] = choices
self._choices_updated(instance, choices)
selection = self.__get__(instance)
self.notify(instance, selection, selection)
def _choices_updated(self, instance, choices):
if not choices:
self.__set__(instance, None)
return
selection = self.__get__(instance)
# We do the following because 'selection in choice' actually compares
# equality not identity (and we really just care about identity here)
for choice in choices:
if selection is choice:
return
choices_without_separators = [choice for choice in choices
if not isinstance(choice, ChoiceSeparator)]
if choices_without_separators:
try:
selection = choices_without_separators[self.default_index]
except IndexError:
if self.default_index > 0:
selection = choices_without_separators[-1]
else:
selection = choices_without_separators[0]
else:
selection = None
self.__set__(instance, selection)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/external/echo/selection.py",
"copies": "1",
"size": "3306",
"license": "bsd-3-clause",
"hash": -6064434104924679000,
"line_mean": 33.4375,
"line_max": 103,
"alpha_frac": 0.610707804,
"autogenerated": false,
"ratio": 4.598052851182198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5708760655182198,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import random
import io
import stripe
class MultipartDataGenerator(object):
def __init__(self, chunk_size=1028):
self.data = io.BytesIO()
self.line_break = "\r\n"
self.boundary = self._initialize_boundary()
self.chunk_size = chunk_size
def add_params(self, params):
# Flatten parameters first
params = dict(stripe.api_requestor._api_encode(params))
for key, value in stripe.six.iteritems(params):
if value is None:
continue
self._write(self.param_header())
self._write(self.line_break)
if hasattr(value, "read"):
filename = "blob"
if hasattr(value, "name"):
# Convert the filename to string, just in case it's not
# already one. E.g. `tempfile.TemporaryFile` has a `name`
# attribute but it's an `int`.
filename = stripe.six.text_type(value.name)
self._write('Content-Disposition: form-data; name="')
self._write(key)
self._write('"; filename="')
self._write(filename)
self._write('"')
self._write(self.line_break)
self._write("Content-Type: application/octet-stream")
self._write(self.line_break)
self._write(self.line_break)
self._write_file(value)
else:
self._write('Content-Disposition: form-data; name="')
self._write(key)
self._write('"')
self._write(self.line_break)
self._write(self.line_break)
self._write(str(value))
self._write(self.line_break)
def param_header(self):
return "--%s" % self.boundary
def get_post_data(self):
self._write("--%s--" % (self.boundary,))
self._write(self.line_break)
return self.data.getvalue()
def _write(self, value):
if isinstance(value, stripe.six.binary_type):
array = bytearray(value)
elif isinstance(value, stripe.six.text_type):
array = bytearray(value, encoding="utf-8")
else:
raise TypeError(
"unexpected type: {value_type}".format(value_type=type(value))
)
self.data.write(array)
def _write_file(self, f):
while True:
file_contents = f.read(self.chunk_size)
if not file_contents:
break
self._write(file_contents)
def _initialize_boundary(self):
return random.randint(0, 2 ** 63)
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/multipart_data_generator.py",
"copies": "1",
"size": "2745",
"license": "mit",
"hash": 3818181858686663000,
"line_mean": 32.0722891566,
"line_max": 78,
"alpha_frac": 0.5322404372,
"autogenerated": false,
"ratio": 4.178082191780822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210322628980821,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import random
import threading
import tempfile
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers, learn
import gym
from deep_rl.agents import A3CAgent
from deep_rl.graphs import create_a3c_graph
from deep_rl.trajectories import discount, sample_traj
from deep_rl.envs import EnvWrapper
from deep_rl.misc import first_in_collection, categorical_sample
from six.moves import range
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("name", "CartPole-v0", "Name of the environment to train/play")
flags.DEFINE_float("gamma", 0.99, "Discount rate")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_float('beta', 0.01, "Beta regularization term for A3C")
flags.DEFINE_integer("a3c_update_interval", 4, "Number of timesteps before updating the actor-critic model")
flags.DEFINE_integer("save_model_interval", 120, "Interval to save model (seconds)")
flags.DEFINE_integer("save_summaries_interval", 120, "Interval to save summaries (seconds)")
flags.DEFINE_integer("num_threads", 1, "Number of threads or environments to explore concurrently")
flags.DEFINE_integer("exploration_steps", 500000, "Number of steps with a decaying epsilon")
flags.DEFINE_integer("total_steps", 1250000, "Total steps")
flags.DEFINE_integer("seed", 0, "Random seed")
flags.DEFINE_string("outdir", "", "Prefix for monitoring, summary and checkpoint directories")
flags.DEFINE_bool("render", False, "Render environment during training")
def simple_nn(states, hidden_sizes):
return learn.ops.dnn(states, hidden_sizes, activation=tf.nn.tanh)
outdir = FLAGS.outdir
if outdir == "":
outdir = tempfile.mkdtemp()
print(outdir)
monitor_env = EnvWrapper(FLAGS.name)
monitor_dir = outdir + '/monitor'
n_action = monitor_env.action_space.n
input_shape = (None,) + monitor_env.observation_space.shape
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
random.seed(FLAGS.seed)
monitor_env.seed(FLAGS.seed)
gym.logger.setLevel(logging.WARN)
print("Input shape {}".format(input_shape))
print("Number of Actions {}".format(n_action))
def main(_):
g = tf.Graph()
with g.as_default(), tf.device('/cpu:0'):
opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
hiddens = [64, 64]
def model(states):
return simple_nn(states, hiddens)
create_a3c_graph(input_shape, n_action, model, opt, beta=FLAGS.beta)
T = tf.Variable(0, trainable=False)
tf.add_to_collection("global_step", T)
agent = A3CAgent(g, FLAGS.exploration_steps, FLAGS.total_steps, FLAGS.gamma, FLAGS.a3c_update_interval, categorical_sample)
sv = tf.train.Supervisor(g,
logdir=outdir,
global_step=T,
save_model_secs=FLAGS.save_model_interval,
save_summaries_secs=FLAGS.save_summaries_interval)
with sv.managed_session() as sess:
try:
coord = sv.coord
envs = [EnvWrapper(FLAGS.name) for _ in range(FLAGS.num_threads - 1)]
envs.insert(0, monitor_env)
for e in envs:
e.monitor.start(monitor_dir, resume=True, video_callable=False)
threads = [threading.Thread(target=agent.run,
args=(i, sess, coord, envs[i]))
for i in range(FLAGS.num_threads)]
for t in threads:
t.start()
coord.join(threads)
except Exception as e:
print("Error training model ...")
print(e)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "domluna/deep_rl",
"path": "examples/run_a3c.py",
"copies": "1",
"size": "3805",
"license": "mit",
"hash": 8206693651668566000,
"line_mean": 35.2380952381,
"line_max": 131,
"alpha_frac": 0.6509855453,
"autogenerated": false,
"ratio": 3.6977648202138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840972117625044,
"avg_score": 0.0015556495777509626,
"num_lines": 105
} |
from __future__ import absolute_import, division, print_function
import random
import time
from workflows.services.common_service import CommonService
class SampleTxn(CommonService):
'''An example service building on top of the workflow.services architecture,
demonstrating how this architecture can be used.
This service consumes a messages off one queue and places it into another.
Transactions are used to guarantee correct message handling.
The service is deliberately unreliable and prone to failure.'''
# Human readable service name
_service_name = "Transaction sample"
def initializing(self):
'''Subscribe to a channel. Received messages must be acknowledged.'''
self.subid = self._transport.subscribe('transient.transaction', self.receive_message, acknowledgement=True)
@staticmethod
def crashpoint():
'''Return true if the service should malfunction at this point.'''
# Probability of not crashing is 90%
return random.uniform(0, 1) > 0.90
def receive_message(self, header, message):
'''Receive a message'''
print("=== Receive ===")
print(header)
print(message)
print("MsgID: {0}".format(header['message-id']))
assert header['message-id']
txn = self._transport.transaction_begin()
print(" 1. Txn: {0}".format(str(txn)))
if self.crashpoint():
self._transport.transaction_abort(txn)
print("--- Abort ---")
return
self._transport.ack(header['message-id'], self.subid, transaction=txn)
print(" 2. Ack")
if self.crashpoint():
self._transport.transaction_abort(txn)
print("--- Abort ---")
return
self._transport.send('transient.destination', message, transaction=txn)
print(" 3. Send")
if self.crashpoint():
self._transport.transaction_abort(txn)
print("--- Abort ---")
return
self._transport.transaction_commit(txn)
print(" 4. Commit")
print("=== Done ===")
class SampleTxnProducer(CommonService):
'''An example service building on top of the workflow.services architecture,
demonstrating how this architecture can be used.
This service generates messages for the Transaction example.'''
# Human readable service name
_service_name = "TXN Message Producer"
counter = 0
def initializing(self):
'''Service initialization. This function is run before any commands are
received from the frontend. This is the place to request channel
subscriptions with the messaging layer, and register callbacks.
This function can be overridden by specific service implementations.'''
self._register_idle(3, self.create_message)
def create_message(self):
'''Create and send a unique message for this service.'''
self.counter += 1
self._transport.send("transient.transaction",
"TXMessage #%d\n++++++++Produced@ %f" % (
self.counter,
(time.time() % 1000) * 1000
))
self.log.info("Created message %d", self.counter)
| {
"repo_name": "xia2/workflows",
"path": "workflows/services/sample_transaction.py",
"copies": "1",
"size": "3078",
"license": "bsd-3-clause",
"hash": 2098202327255600400,
"line_mean": 33.2,
"line_max": 111,
"alpha_frac": 0.6656920078,
"autogenerated": false,
"ratio": 4.298882681564246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009894905023924887,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
def build_data(n):
xs = []
ys = []
for i in range(0, 2000):
k = random.uniform(1, 50)
x = [[np.sin(k + j)] for j in range(0, n)]
y = [np.sin(k + n)]
# x[i] = sin(k + i) (i = 0, 1, ..., n-1)
# y[i] = sin(k + n)
xs.append(x)
ys.append(y)
train_x = np.array(xs[0: 1500])
train_y = np.array(ys[0: 1500])
test_x = np.array(xs[1500:])
test_y = np.array(ys[1500:])
return (train_x, train_y, test_x, test_y)
length = 10
time_step_size = length
vector_size = 1
batch_size = 10
test_size = 10
# build data
(train_x, train_y, test_x, test_y) = build_data(length)
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
X = tf.placeholder("float", [None, length, vector_size])
Y = tf.placeholder("float", [None, 1])
# get lstm_size and output predicted value
W = tf.Variable(tf.random_normal([10, 1], stddev=0.01))
B = tf.Variable(tf.random_normal([1], stddev=0.01))
def seq_predict_model(X, w, b, time_step_size, vector_size):
# input X shape: [batch_size, time_step_size, vector_size]
# transpose X to [time_step_size, batch_size, vector_size]
X = tf.transpose(X, [1, 0, 2])
# reshape X to [time_step_size * batch_size, vector_size]
X = tf.reshape(X, [-1, vector_size])
# split X, array[time_step_size], shape: [batch_size, vector_size]
X = tf.split(X, time_step_size, 0)
# LSTM model with state_size = 10
cell = core_rnn_cell.BasicLSTMCell(num_units=10,
forget_bias=1.0,
state_is_tuple=True)
outputs, _states = core_rnn.static_rnn(cell, X, dtype=tf.float32)
# Linear activation
return tf.matmul(outputs[-1], w) + b, cell.state_size
pred_y, _ = seq_predict_model(X, W, B, time_step_size, vector_size)
loss = tf.square(tf.subtract(Y, pred_y))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
writer = tf.summary.FileWriter('./log', sess.graph)
# train
for i in range(50):
# train
for end in range(batch_size, len(train_x), batch_size):
begin = end - batch_size
x_value = train_x[begin: end]
y_value = train_y[begin: end]
sess.run(train_op, feed_dict={X: x_value, Y: y_value})
# randomly select validation set from test set
test_indices = np.arange(len(test_x))
np.random.shuffle(test_indices)
test_indices = test_indices[0: test_size]
x_value = test_x[test_indices]
y_value = test_y[test_indices]
# eval in validation set
val_loss = np.mean(sess.run(loss,
feed_dict={X: x_value, Y: y_value}))
print('Run %s' % i, val_loss)
for b in range(0, len(test_x), test_size):
x_value = test_x[b: b + test_size]
y_value = test_y[b: b + test_size]
pred = sess.run(pred_y, feed_dict={X: x_value})
for i in range(len(pred)):
print(pred[i], y_value[i], pred[i] - y_value[i])
| {
"repo_name": "DeepVisionTeam/TensorFlowBook",
"path": "rnn/sin/lstm_sin.py",
"copies": "2",
"size": "3352",
"license": "apache-2.0",
"hash": 2088985318783766300,
"line_mean": 31.5436893204,
"line_max": 72,
"alpha_frac": 0.5868138425,
"autogenerated": false,
"ratio": 2.9769094138543517,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45637232563543517,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import random
import numpy
# Various pre-crafted datasets/variables for testing
# !!! Must not be changed -- only appended !!!
# while testing numpy we better not rely on numpy to produce random
# sequences
random.seed(1)
# but will seed it nevertheless
numpy.random.seed(1)
nx, ny = 1000, 1000
# reduced squares based on indexes_rand, primarily for testing more
# time-consuming functions (ufunc, linalg, etc)
nxs, nys = 100, 100
# a set of interesting types to test
TYPES1 = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'longfloat', 'complex128',
]
if 'complex256' in numpy.typeDict:
TYPES1.append('complex256')
def memoize(func):
result = []
def wrapper():
if not result:
result.append(func())
return result[0]
return wrapper
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
@memoize
def get_values():
rnd = numpy.random.RandomState(1)
values = numpy.tile(rnd.uniform(0, 100, size=nx * ny // 10), 10)
return values
@memoize
def get_squares():
values = get_values()
squares = {t: numpy.array(values,
dtype=getattr(numpy, t)).reshape((nx, ny))
for t in TYPES1}
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
for t, v in squares.items():
if t.startswith('complex'):
v += v.T * 1j
return squares
@memoize
def get_squares_():
# smaller squares
squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()}
return squares_
@memoize
def get_vectors():
# vectors
vectors = {t: s[0] for t, s in get_squares().items()}
return vectors
@memoize
def get_indexes():
indexes = list(range(nx))
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
indexes = numpy.array(indexes)
return indexes
@memoize
def get_indexes_rand():
rnd = random.Random(1)
indexes_rand = get_indexes().tolist() # copy
rnd.shuffle(indexes_rand) # in-place shuffle
indexes_rand = numpy.array(indexes_rand)
return indexes_rand
@memoize
def get_indexes_():
# smaller versions
indexes = get_indexes()
indexes_ = indexes[indexes < nxs]
return indexes_
@memoize
def get_indexes_rand_():
indexes_rand = get_indexes_rand()
indexes_rand_ = indexes_rand[indexes_rand < nxs]
return indexes_rand_
class Benchmark(object):
goal_time = 0.25
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/common.py",
"copies": "1",
"size": "2655",
"license": "mit",
"hash": 646335982683305500,
"line_mean": 21.3109243697,
"line_max": 72,
"alpha_frac": 0.6485875706,
"autogenerated": false,
"ratio": 3.5165562913907285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4665143861990728,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import random
import tensorflow as tf
import gym
from deep_rl.graphs import create_a3c_graph
from deep_rl.misc import first_in_collection
from deep_rl.trajectories import discount
from six.moves import range
EPSILON_ENDS = [0.1, 0.01, 0.5]
class A3CAgent:
"""A3CAgent"""
def __init__(self,
graph,
exploration_steps,
total_steps,
gamma,
a3c_update_interval,
action_sampler):
"""
graph should have the placeholders called "states", "actions",
and "returns". It should also have operations called "loss_op", "train_op",
"probs", and "value".
"""
self.graph = graph
self.gamma = gamma
self.a3c_update_interval = a3c_update_interval
self.action_sampler = action_sampler
self.T = graph.get_collection("global_step")[0]
self.exploration_steps = exploration_steps
self.total_steps = total_steps
self.incr_T = tf.assign_add(self.T, 1)
def pick_epsilon(self, T):
rv = random.random()
if rv < 0.4:
end = EPSILON_ENDS[0]
elif rv < 0.7:
end = EPSILON_ENDS[1]
else:
end = EPSILON_ENDS[2]
if T > self.exploration_steps:
return end
return 1.0 - T * (1.0 - end) / self.exploration_steps
def run(self, t_id, session, coord, env):
t = 0
t_start = 0 # for updating params
t_ep = 0 # for checking is an episode is done
ep_reward = 0
actions = []
states = []
rewards = []
# inputs and ops
_actions = self.graph.get_collection("actions")[0]
_returns = self.graph.get_collection("returns")[0]
pol_in = self.graph.get_collection("policy_in")[0]
pol_out = self.graph.get_collection("policy_out")[0]
pol_train_op = self.graph.get_collection("policy_train_op")[0]
val_in = self.graph.get_collection("value_in")[0]
val_out = self.graph.get_collection("value_out")[0]
val_train_op = self.graph.get_collection("value_train_op")[0]
state = env.reset()
try:
while not coord.should_stop():
T = session.run(self.T)
if T > self.total_steps:
break
epsilon = self.pick_epsilon(T)
if random.random() < epsilon:
action = env.action_space.sample()
else:
probs = session.run(pol_out, feed_dict={pol_in: state.reshape(1, *state.shape)})
action = self.action_sampler(probs)[0]
next_state, reward, done, info = env.step(action)
states.append(state)
actions.append(action)
rewards.append(reward)
ep_reward += reward
t += 1
session.run(self.incr_T)
# update params
if done or t - t_start == self.a3c_update_interval:
last_state = states[-1]
val = 0
if not done:
val = session.run(val_out,
feed_dict={val_in:
last_state.reshape(1, *last_state.shape)})
rewards.append(val)
returns = discount(rewards, self.gamma)[:-1]
session.run([val_train_op, pol_train_op],
feed_dict={val_in: states,
pol_in: states,
_returns: returns,
_actions: actions})
actions = []
states = []
rewards = []
t_start = t
# TODO: see if we can monitor all the envs
if done or t - t_ep == env.spec.timestep_limit:
state = env.reset()
print("Thread id {}: Episode reward = {}, timestep = {}".format(t_id, ep_reward,
t))
ep_reward = 0
t_ep = t
else:
state = next_state
except Exception as e:
coord.request_stop(e)
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/agents/a3c.py",
"copies": "1",
"size": "4504",
"license": "mit",
"hash": -5563279794707713000,
"line_mean": 32.8646616541,
"line_max": 100,
"alpha_frac": 0.4735790409,
"autogenerated": false,
"ratio": 4.237064910630291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210643951530292,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import recordlinkage as rl
from recordlinkage.datasets import load_febrl1, load_febrl4
class CompareRecordLinkage(object):
timeout = 30 * 60
def setup(self):
# download data
self.A, self.B = load_febrl4()
# make pairs
c_pairs = rl.FullIndex()
pairs = c_pairs.index(self.A, self.B)
# different sizes of pairs
self.pairs_xsmall = pairs[0:5e3]
self.pairs_small = pairs[0:5e4]
self.pairs_medium = pairs[0:5e5]
self.pairs_large = pairs[0:5e6]
def time_global_xsmall(self):
c_compare = rl.Compare(self.pairs_xsmall, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_small(self):
c_compare = rl.Compare(self.pairs_small, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_medium(self):
c_compare = rl.Compare(self.pairs_medium, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_large(self):
c_compare = rl.Compare(self.pairs_large, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
class CompareDeduplication(object):
timeout = 30 * 60
def setup(self):
# download data
self.A = load_febrl1()
# make pairs
c_pairs = rl.FullIndex()
pairs = c_pairs.index(self.A)
# different sizes of pairs
self.pairs_xsmall = pairs[0:5e3]
self.pairs_small = pairs[0:5e4]
self.pairs_medium = pairs[0:5e5]
self.pairs_large = pairs[0:5e6]
def time_global_xsmall(self):
c_compare = rl.Compare(self.pairs_xsmall, self.A)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_small(self):
c_compare = rl.Compare(self.pairs_small, self.A)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_medium(self):
c_compare = rl.Compare(self.pairs_medium, self.A)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
def time_global_large(self):
c_compare = rl.Compare(self.pairs_large, self.A)
c_compare.string('given_name', 'given_name', method='jaro')
c_compare.string('surname', 'surname',
method='jarowinkler', threshold=0.85)
c_compare.date('date_of_birth', 'date_of_birth')
c_compare.exact('suburb', 'suburb')
c_compare.exact('state', 'state')
c_compare.string('address_1', 'address_1',
method='levenshtein', threshold=0.85)
class CompareAlgorithms(object):
timeout = 30 * 60
def setup(self):
# download data
self.A, self.B = load_febrl4()
# Add numbers (age)
self.A['postcode'] = self.A['postcode'].astype(float)
self.B['postcode'] = self.B['postcode'].astype(float)
# make pairs
c_pairs = rl.FullIndex()
self.pairs = c_pairs.index(self.A, self.B)[0:5e4]
# ************* STRING *************
def time_string_jaro(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jaro')
def time_string_jarowinkler(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.string('given_name', 'given_name', method='jarowinkler')
def time_string_qgram(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.string('given_name', 'given_name', method='qgram')
def time_string_cosine(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.string('given_name', 'given_name', method='cosine')
def time_string_levenshtein(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.string('given_name', 'given_name', method='levenshtein')
# ************* Exact *************
def time_exact(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.exact('state', 'state')
# ************* NUMERIC *************
def time_numeric_gauss(self):
c_compare = rl.Compare(self.pairs, self.A, self.B)
c_compare.numeric('age', 'age', method='gauss', scale=2)
| {
"repo_name": "J535D165/recordlinkage",
"path": "benchmarks/bench_comparing.py",
"copies": "1",
"size": "6937",
"license": "bsd-3-clause",
"hash": 4714079973345498000,
"line_mean": 33.8592964824,
"line_max": 74,
"alpha_frac": 0.574023353,
"autogenerated": false,
"ratio": 3.347972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4421996325972973,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import recordlinkage as rl
from recordlinkage.datasets import load_febrl1, load_febrl4
class PairsRecordLinkage(object):
timeout = 30 * 60
def setup(self):
# download data
self.A, self.B = load_febrl4()
def time_full_index(self):
# setup class
c_pairs = rl.FullIndex()
# Make pairs
c_pairs.index(self.A, self.B)
def time_block_index(self):
# setup class
c_pairs = rl.BlockIndex('given_name')
# Make pairs
c_pairs.index(self.A, self.B)
def time_sni_index(self):
# setup class
c_pairs = rl.SortedNeighbourhoodIndex(on='given_name', w=5)
# Make pairs
c_pairs.index(self.A, self.B)
def time_random_index(self):
# setup class
c_pairs = rl.RandomIndex(2500)
# Make pairs
c_pairs.index(self.A, self.B)
class PairsDeduplication(object):
timeout = 30 * 60
def setup(self):
# download data
self.A = load_febrl1()
def time_full_index(self):
# setup class
c_pairs = rl.FullIndex()
# Make pairs
c_pairs.index(self.A)
def time_block_index(self):
# setup class
c_pairs = rl.BlockIndex('given_name')
# Make pairs
c_pairs.index(self.A)
def time_sni_index(self):
# setup class
c_pairs = rl.SortedNeighbourhoodIndex(on='given_name', w=5)
# Make pairs
c_pairs.index(self.A)
def time_random_index(self):
# setup class
c_pairs = rl.RandomIndex(2500)
# Make pairs
c_pairs.index(self.A)
| {
"repo_name": "J535D165/recordlinkage",
"path": "benchmarks/bench_indexing.py",
"copies": "1",
"size": "1699",
"license": "bsd-3-clause",
"hash": -8827227398262038000,
"line_mean": 18.3068181818,
"line_max": 67,
"alpha_frac": 0.5744555621,
"autogenerated": false,
"ratio": 3.4185110663983904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44929666284983905,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from collections import OrderedDict
from imps.stdlib import FUTURE, get_paths, LOCAL, RELATIVE, STDLIB, THIRDPARTY
NOQA = r'.*\s*\#\sNOQA' # wont work if NOQA is inside a triple string.
PYLINT_IGNORE = r'.*\s*\#\s*pylint:\s*disable\=.*$' # wont work if pylint: disable is inside a triple string.
FROM_IMPORT_LINE = r'^from\s.*import\s.*'
FROM_IMPORT_PARAN_LINE = r'^from\s.*import\s\(.*'
def sortable_key(s):
s = s.strip()
broken_s = s.split(' ')
results = []
for bs in broken_s:
new_s = ''
for c in bs:
if c.islower():
new_s += '1'
else:
new_s += '0'
results.append(bs.lower() + new_s)
return ' '.join(results)
def does_line_have_hash_noqa(line):
return re.match(NOQA, line, re.IGNORECASE)
def does_line_end_in_pylint_ignore(line):
if re.match(PYLINT_IGNORE, line, re.IGNORECASE):
_, post = re.split(r'#\spylint', line, re.IGNORECASE)
if 'F0401' in post or 'E0611' in post:
return True
return False
def _classify_imports(imports, local_imports):
result = OrderedDict()
result[FUTURE] = []
result[STDLIB] = []
result[THIRDPARTY] = []
result[LOCAL] = []
result[RELATIVE] = []
for i in imports:
result[get_paths(i, local_imports)].append(i)
return result
def _get_core_import(imp):
imp = re.sub(r'^from\s+', '', imp)
imp = re.sub(r'^import\s+', '', imp)
return re.sub(r'\s+.*', '', imp)
def _sorter_relative_imports(s):
s = s.replace('.', chr(ord('z') + 1))
s = s.replace('_', chr(ord('A') - 1))
return s.lower()
def _sorter(s):
s = s.replace('.', chr(ord('A') - 2))
s = s.replace('_', chr(ord('A') - 1))
# We only alphabetically sort the from part of the imports in style: from X import Y
if re.match(FROM_IMPORT_PARAN_LINE, s):
s = re.sub(r'\#.*\n', '', s)
s = re.sub(r'\s+', ' ', s)
s = sortable_key(s[4:s.find(' import ')]) + ' import' + s[s.find('(') + 1:s.find(')')]
if re.match(FROM_IMPORT_LINE, s):
s = sortable_key(s[4:s.find(' import ')]) + s[s.find(' import '):]
return sortable_key(s)
def _sorter_unify_import_and_from(s):
s = re.sub(r'^from\s+', '', s)
s = re.sub(r'^import\s+', '', s)
return _sorter(s)
def _remove_double_newlines(lines):
i = 0
while i < len(lines) - 1:
if lines[i+1] == lines[i] == '':
lines[i:i+1] = []
else:
i += 1
return lines
def _get_builder_func(s, max_line_length, indent):
if s in ('s', 'smarkets'):
return SmarketsBuilder(max_line_length, indent)
elif s in ('g', 'google'):
return GoogleBuilder(max_line_length, indent)
elif s in ('c', 'crypto', 'cryptography'):
return CryptoBuilder(max_line_length, indent)
else:
raise Exception('Unknown style type %s', s)
class GenericBuilder(object):
def __init__(self, max_line_length, indent):
self.max_line_length = max_line_length
self.indent = indent
def do_all(
self, imports_by_type, from_imports_by_type, lines_before_any_imports, pre_import,
pre_from_import, after_imports
):
output = '\n'.join(lines_before_any_imports)
self.new_import_group = False
for typ in imports_by_type.keys():
if typ == RELATIVE:
continue
new_import_group = self.special_sort(
imports_by_type, from_imports_by_type, typ, pre_import, pre_from_import
)
if new_import_group:
self.new_import_group = True
output += new_import_group + '\n'
output += self._relative_builder_func(from_imports_by_type, pre_from_import)
output = output.strip()
after_imports_str = '\n'.join(after_imports).strip()
result = (output + '\n\n\n' + after_imports_str).strip()
if result:
return result + '\n'
return ''
def _relative_builder_func(self, from_imports, pre_from_import):
output = ""
for imp in sorted(from_imports[RELATIVE], key=_sorter_relative_imports):
output += self._build(imp, pre_from_import[imp])
return output
def _build(self, core_import, pre_imp):
pre_imp = [a for a in pre_imp if a]
output = '\n'.join([''] + pre_imp + [''])
output += self._split_core_import(core_import)
return output
def _split_core_import(self, core_import):
if len(core_import) <= self.max_line_length or does_line_have_hash_noqa(core_import) or (
'(' in core_import and ')' in core_import) or does_line_end_in_pylint_ignore(core_import):
return core_import
# To turn a long line of imports into a multiline import using parenthesis
result = (',\n' + self.indent).join([s.strip() for s in core_import.split(',')])
result = re.sub(r'import\s+', 'import (\n' + self.indent, result)
result += ",\n)"
return result
def special_sort(self, *args):
raise NotImplementedError()
class SmarketsBuilder(GenericBuilder):
def special_sort(self, imports, from_imports, typ, pre_import, pre_from_import):
output = ""
for imp in sorted(imports[typ], key=_sorter):
output += self._build(imp, pre_import[imp])
for imp in sorted(from_imports[typ], key=_sorter):
output += self._build(imp, pre_from_import[imp])
return output
class GoogleBuilder(GenericBuilder):
def special_sort(self, imports, from_imports, typ, pre_import, pre_from_import):
output = ""
for imp in sorted(imports[typ] + from_imports[typ], key=_sorter_unify_import_and_from):
output += self._build(imp, pre_import.get(imp, pre_from_import.get(imp)))
return output
class CryptoBuilder(GenericBuilder):
def special_sort(self, imports, from_imports, typ, pre_import, pre_from_import):
output = ""
if typ in (STDLIB, FUTURE, RELATIVE):
for imp in sorted(imports[typ], key=_sorter):
output += self._build(imp, pre_import[imp])
for imp in sorted(from_imports[typ], key=_sorter):
output += self._build(imp, pre_from_import[imp])
else:
last_imp = ''
for imp in sorted(imports[typ] + from_imports[typ], key=_sorter_unify_import_and_from):
if not last_imp or not _get_core_import(imp).startswith(last_imp):
if last_imp:
if imp in pre_import:
pre_import.get(imp).append('')
if imp in pre_from_import:
pre_from_import.get(imp).append('')
last_imp = _get_core_import(imp)
output += self._build(imp, pre_import.get(imp, pre_from_import.get(imp)))
return output
class Rebuilder():
def __init__(self, type='s', max_line_length=80, indent=" "):
self.builder_object = _get_builder_func(type, int(max_line_length), indent)
def rebuild(
self, local_imports, pre_import, pre_from_import, lines_before_any_imports,
after_imports
):
imports_by_type = _classify_imports(pre_import.keys(), local_imports)
from_imports_by_type = _classify_imports(pre_from_import.keys(), local_imports)
return self.builder_object.do_all(
imports_by_type, from_imports_by_type, lines_before_any_imports, pre_import,
pre_from_import, after_imports
)
| {
"repo_name": "bootandy/imps",
"path": "imps/rebuilders.py",
"copies": "1",
"size": "7692",
"license": "apache-2.0",
"hash": -7344063784257826000,
"line_mean": 33.1866666667,
"line_max": 110,
"alpha_frac": 0.5733229329,
"autogenerated": false,
"ratio": 3.3796133567662565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44529362896662567,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
from .utils import funcname
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
m = re.search(_HASHPAT, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
attributes = {'rankdir': 'BT'}
attributes.update(kwargs)
g = Digraph(graph_attr=attributes)
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format=None, **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']
if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):
format = filename.lower().split('.')[-1]
filename = filename.rsplit('.')[0]
if format is None:
format = 'png'
data = g.pipe(format=format)
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return _get_display_cls(format)(filename=full_filename)
| {
"repo_name": "vikhyat/dask",
"path": "dask/dot.py",
"copies": "1",
"size": "6252",
"license": "bsd-3-clause",
"hash": 155931264962897400,
"line_mean": 27.4181818182,
"line_max": 80,
"alpha_frac": 0.5746960972,
"autogenerated": false,
"ratio": 3.7280858676207513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801947057481779,
"avg_score": 0.00016698146779447592,
"num_lines": 220
} |
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
return func.__name__
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
m = re.search(_HASHPAT, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
g = Digraph(graph_attr={'rankdir': 'BT'})
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format='png', **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
data = g.pipe(format=format)
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return _get_display_cls(format)(filename=full_filename)
| {
"repo_name": "clarkfitzg/dask",
"path": "dask/dot.py",
"copies": "1",
"size": "6019",
"license": "bsd-3-clause",
"hash": -1726018515621820000,
"line_mean": 26.9953488372,
"line_max": 80,
"alpha_frac": 0.5736833361,
"autogenerated": false,
"ratio": 3.724628712871287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47983120489712866,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from tornado import httputil
from tornado.httpserver import _CallableAdapter
from tornado.escape import url_escape, url_unescape, utf8
from tornado.log import app_log
from tornado.util import basestring_type, import_object, re_unescape, unicode_type
try:
import typing # noqa
except ImportError:
pass
class Router(httputil.HTTPServerConnectionDelegate):
"""Abstract router interface."""
def find_handler(self, request, **kwargs):
# type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
that can serve the request.
Routing implementations may pass additional kwargs to extend the routing logic.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg kwargs: additional keyword arguments passed by routing implementation.
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
process the request.
"""
raise NotImplementedError()
def start_request(self, server_conn, request_conn):
return _RoutingDelegate(self, server_conn, request_conn)
class ReversibleRouter(Router):
"""Abstract router interface for routers that can handle named routes
and support reversing them to original urls.
"""
def reverse_url(self, name, *args):
"""Returns url string for a given route name and arguments
or ``None`` if no match is found.
:arg str name: route name.
:arg args: url parameters.
:returns: parametrized url string for a given route name (or ``None``).
"""
raise NotImplementedError()
class _RoutingDelegate(httputil.HTTPMessageDelegate):
def __init__(self, router, server_conn, request_conn):
self.server_conn = server_conn
self.request_conn = request_conn
self.delegate = None
self.router = router # type: Router
def headers_received(self, start_line, headers):
request = httputil.HTTPServerRequest(
connection=self.request_conn,
server_connection=self.server_conn,
start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
return self.delegate.data_received(chunk)
def finish(self):
self.delegate.finish()
def on_connection_close(self):
self.delegate.on_connection_close()
class RuleRouter(Router):
"""Rule-based router implementation."""
def __init__(self, rules=None):
"""Constructs a router from an ordered list of rules::
RuleRouter([
Rule(PathMatches("/handler"), Target),
# ... more rules
])
You can also omit explicit `Rule` constructor and use tuples of arguments::
RuleRouter([
(PathMatches("/handler"), Target),
])
`PathMatches` is a default matcher, so the example above can be simplified::
RuleRouter([
("/handler", Target),
])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments.
"""
self.rules = [] # type: typing.List[Rule]
if rules:
self.add_rules(rules)
def add_rules(self, rules):
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule))
def process_rule(self, rule):
"""Override this method for additional preprocessing of each rule.
:arg Rule rule: a rule to be processed.
:returns: the same or modified Rule instance.
"""
return rule
def find_handler(self, request, **kwargs):
for rule in self.rules:
target_params = rule.matcher.match(request)
if target_params is not None:
if rule.target_kwargs:
target_params['target_kwargs'] = rule.target_kwargs
delegate = self.get_target_delegate(
rule.target, request, **target_params)
if delegate is not None:
return delegate
return None
def get_target_delegate(self, target, request, **target_params):
"""Returns an instance of `~.httputil.HTTPMessageDelegate` for a
Rule's target. This method is called by `~.find_handler` and can be
extended to provide additional target types.
:arg target: a Rule's target.
:arg httputil.HTTPServerRequest request: current request.
:arg target_params: additional parameters that can be useful
for `~.httputil.HTTPMessageDelegate` creation.
"""
if isinstance(target, Router):
return target.find_handler(request, **target_params)
elif isinstance(target, httputil.HTTPServerConnectionDelegate):
return target.start_request(request.server_connection, request.connection)
elif callable(target):
return _CallableAdapter(
partial(target, **target_params), request.connection
)
return None
class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
"""A rule-based router that implements ``reverse_url`` method.
Each rule added to this router may have a ``name`` attribute that can be
used to reconstruct an original uri. The actual reconstruction takes place
in a rule's matcher (see `Matcher.reverse`).
"""
def __init__(self, rules=None):
self.named_rules = {} # type: typing.Dict[str]
super(ReversibleRuleRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(ReversibleRuleRouter, self).process_rule(rule)
if rule.name:
if rule.name in self.named_rules:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
rule.name)
self.named_rules[rule.name] = rule
return rule
def reverse_url(self, name, *args):
if name in self.named_rules:
return self.named_rules[name].matcher.reverse(*args)
for rule in self.rules:
if isinstance(rule.target, ReversibleRouter):
reversed_url = rule.target.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
return None
class Rule(object):
"""A routing rule."""
def __init__(self, matcher, target, target_kwargs=None, name=None):
"""Constructs a Rule instance.
:arg Matcher matcher: a `Matcher` instance used for determining
whether the rule should be considered a match for a specific
request.
:arg target: a Rule's target (typically a ``RequestHandler`` or
`~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
depending on routing implementation).
:arg dict target_kwargs: a dict of parameters that can be useful
at the moment of target instantiation (for example, ``status_code``
for a ``RequestHandler`` subclass). They end up in
``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
method.
:arg str name: the name of the rule that can be used to find it
in `ReversibleRouter.reverse_url` implementation.
"""
if isinstance(target, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
target = import_object(target)
self.matcher = matcher # type: Matcher
self.target = target
self.target_kwargs = target_kwargs if target_kwargs else {}
self.name = name
def reverse(self, *args):
return self.matcher.reverse(*args)
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.matcher,
self.target, self.target_kwargs, self.name)
class Matcher(object):
"""Represents a matcher for request features."""
def match(self, request):
"""Matches current instance against the request.
:arg httputil.HTTPServerRequest request: current HTTP request
:returns: a dict of parameters to be passed to the target handler
(for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
can be passed for proper `~.web.RequestHandler` instantiation).
An empty dict is a valid (and common) return value to indicate a match
when the argument-passing features are not used.
``None`` must be returned to indicate that there is no match."""
raise NotImplementedError()
def reverse(self, *args):
"""Reconstructs full url from matcher instance and additional arguments."""
return None
class AnyMatches(Matcher):
"""Matches any request."""
def match(self, request):
return {}
class HostMatches(Matcher):
"""Matches requests from hosts specified by ``host_pattern`` regex."""
def __init__(self, host_pattern):
if isinstance(host_pattern, basestring_type):
if not host_pattern.endswith("$"):
host_pattern += "$"
self.host_pattern = re.compile(host_pattern)
else:
self.host_pattern = host_pattern
def match(self, request):
if self.host_pattern.match(request.host_name):
return {}
return None
class DefaultHostMatches(Matcher):
"""Matches requests from host that is equal to application's default_host.
Always returns no match if ``X-Real-Ip`` header is present.
"""
def __init__(self, application, host_pattern):
self.application = application
self.host_pattern = host_pattern
def match(self, request):
# Look for default host if not behind load balancer (for debugging)
if "X-Real-Ip" not in request.headers:
if self.host_pattern.match(self.application.default_host):
return {}
return None
class PathMatches(Matcher):
"""Matches requests with paths specified by ``path_pattern`` regex."""
def __init__(self, path_pattern):
if isinstance(path_pattern, basestring_type):
if not path_pattern.endswith('$'):
path_pattern += '$'
self.regex = re.compile(path_pattern)
else:
self.regex = path_pattern
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self._path, self._group_count = self._find_groups()
def match(self, request):
match = self.regex.match(request.path)
if match is None:
return None
if not self.regex.groups:
return {}
path_args, path_kwargs = [], {}
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if self.regex.groupindex:
path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
path_args = [_unquote_or_none(s) for s in match.groups()]
return dict(path_args=path_args, path_kwargs=path_kwargs)
def reverse(self, *args):
if self._path is None:
raise ValueError("Cannot reverse url regex " + self.regex.pattern)
assert len(args) == self._group_count, "required number of arguments " \
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return None, None
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
try:
unescaped_fragment = re_unescape(fragment)
except ValueError as exc:
# If we can't unescape part of it, we can't
# reverse this url.
return (None, None)
pieces.append(unescaped_fragment)
return ''.join(pieces), self.regex.groups
class URLSpec(Rule):
"""Specifies mappings between URLs and handlers.
.. versionchanged: 4.5
`URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
backwards compatibility.
"""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups may
may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`~.web.Application.reverse_url`.
"""
super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name)
self.regex = self.matcher.regex
self.handler_class = self.target
self.kwargs = kwargs
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unmatched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return url_unescape(s, encoding=None, plus=False)
| {
"repo_name": "baixuexue123/pyagent",
"path": "tornado_routing.py",
"copies": "1",
"size": "15904",
"license": "mit",
"hash": -7525367047206485000,
"line_mean": 34.5,
"line_max": 105,
"alpha_frac": 0.6038103622,
"autogenerated": false,
"ratio": 4.457399103139013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5561209465339013,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from html.parser import HTMLParser
import requests
package_regexp = "(.+?)/(.+)"
class MetaHTMLParser(HTMLParser):
def __init__(self, variables):
self.meta = {}
self.variables = variables
HTMLParser.__init__(self)
def replace_values(self, s):
for k, v in self.variables.items():
s = s.replace("{%s}" % k, v)
return s
def handle_starttag(self, tag, attrs):
if tag == "meta":
d = dict(attrs)
if 'name' in d and d['name'] == 'appr-package':
name, source = d['content'].split(" ")
name = self.replace_values(name)
source = self.replace_values(source)
if name not in self.meta:
self.meta[name] = []
self.meta[name].append(source)
def split_package_name(package):
m = re.search(package_regexp, package)
host, name = (m.group(1), m.group(2))
return (host, name)
def ishosted(package):
host, _ = split_package_name(package)
if "." in host or 'localhost' in host:
return True
else:
return False
def discover_sources(package, version, media_type, secure=False):
schemes = ["https://", "http://"]
host, name = split_package_name(package)
for scheme in schemes:
url = scheme + host
try:
r = requests.get(url, params={"appr-discovery": 1}, timeout=2)
except (requests.exceptions.Timeout, requests.ConnectionError) as e:
if scheme == "https://" and not secure:
continue
else:
raise e
r.raise_for_status()
variables = {
'name': name,
'version': version,
"media_type": media_type,
"mediatype": media_type}
p = MetaHTMLParser(variables)
p.feed(r.content.decode())
if package in p.meta:
return p.meta[package]
return None
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/discovery.py",
"copies": "2",
"size": "2037",
"license": "apache-2.0",
"hash": -6581868122980772000,
"line_mean": 27.6901408451,
"line_max": 76,
"alpha_frac": 0.5483554246,
"autogenerated": false,
"ratio": 3.9707602339181287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 71
} |
from __future__ import absolute_import, division, print_function
import re
from itertools import count
from operator import getitem
from .compatibility import unicode, zip_longest
from .core import add, inc # noqa: F401
from .core import (istask, get_dependencies, subs, toposort, flatten,
reverse_dict, ishashable, preorder_traversal)
from .rewrite import END
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies
def default_fused_keys_renamer(keys):
"""Create new keys for fused tasks"""
typ = type(keys[0])
if typ is str or typ is unicode:
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0])
return '-'.join(names)
elif (typ is tuple and len(keys[0]) > 0 and
isinstance(keys[0][0], (str, unicode))):
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0][0])
return ('-'.join(names),) + keys[0][1:]
else:
return None
def fuse(dsk, keys=None, dependencies=None, rename_fused_keys=True):
""" Return new dask graph with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
Supply ``dependencies`` from output of ``cull`` if available to avoid
recomputing dependencies.
Parameters
----------
dsk: dict
keys: list
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
rename_fused_keys: bool or func, optional
Whether to rename the fused keys with ``default_fused_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
func is also accepted, ``new_key = rename_fused_keys(fused_key_list)``.
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dsk, dependencies = fuse(d)
>>> dsk # doctest: +SKIP
{'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}
>>> dsk, dependencies = fuse(d, rename_fused_keys=False)
>>> dsk # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> dsk, dependencies = fuse(d, keys=['b'], rename_fused_keys=False)
>>> dsk # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k, as_list=True)
for k in dsk}
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
dependencies = {k: set(v) for k, v in dependencies.items()}
if rename_fused_keys is True:
key_renamer = default_fused_keys_renamer
elif rename_fused_keys is False:
key_renamer = None
else:
key_renamer = rename_fused_keys
# create a new dask with fused chains
rv = {}
fused = set()
aliases = set()
is_renamed = False
for chain in chains:
if key_renamer is not None:
new_key = key_renamer(chain)
is_renamed = (new_key is not None and new_key not in dsk and
new_key not in rv)
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
dependencies[parent].update(dependencies.pop(child))
dependencies[parent].remove(child)
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
if is_renamed:
rv[new_key] = val
rv[child] = new_key
dependencies[new_key] = dependencies[child]
dependencies[child] = {new_key}
aliases.add(child)
else:
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
if aliases:
for key, deps in dependencies.items():
for old_key in deps & aliases:
new_key = rv[old_key]
deps.remove(old_key)
deps.add(new_key)
rv[key] = subs(rv[key], old_key, new_key)
if keys is not None:
for key in aliases - keys:
del rv[key]
del dependencies[key]
return rv, dependencies
def _flat_set(x):
if x is None:
return set()
elif isinstance(x, set):
return x
elif not isinstance(x, (list, set)):
x = [x]
return set(x)
def inline(dsk, keys=None, inline_constants=True, dependencies=None):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True. Note that
the constant keys will remain in the graph, to remove them follow
``inline`` with ``cull``.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}
"""
if dependencies and isinstance(next(iter(dependencies.values())), list):
dependencies = {k: set(v) for k, v in dependencies.items()}
keys = _flat_set(keys)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
if inline_constants:
keys.update(k for k, v in dsk.items() if
(ishashable(v) and v in dsk) or
(not dependencies[k] and not istask(v)))
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(dict((k, dsk[k]) for k in keys if k in dsk),
dependencies=dependencies)
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & dependencies[key]:
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
dsk2 = keysubs.copy()
for key, val in dsk.items():
if key not in dsk2:
for item in keys & dependencies[key]:
val = subs(val, item, keysubs[item])
dsk2[key] = val
return dsk2
def inline_functions(dsk, output, fast_functions=None, inline_constants=False,
dependencies=None):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [], [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
Protect output keys. In the example below ``i`` is not inlined because it
is marked as an output key.
>>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP
{'out': (add, 'i', (double, 'y')),
'i': (inc, 'x'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
output = set(output)
fast_functions = set(fast_functions)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v) and functions_of(v).issubset(fast_functions) and
dependents[k] and k not in output
]
if keys:
dsk = inline(dsk, keys, inline_constants=inline_constants,
dependencies=dependencies)
for k in keys:
del dsk[k]
return dsk
def unwrap_partial(func):
while hasattr(func, 'func'):
func = func.func
return func
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
funcs = set()
work = [task]
sequence_types = {list, tuple}
while work:
new_work = []
for task in work:
if type(task) in sequence_types:
if istask(task):
funcs.add(unwrap_partial(task[0]))
new_work += task[1:]
else:
new_work += task
work = new_work
return funcs
def dealias(dsk, keys=None, dependencies=None):
""" Remove aliases from dask
Removes and renames aliases using ``inline``. Optional ``keys`` keyword
argument protects keys from being deleted. This is useful to protect keys
that would be expected by a scheduler. If not provided, all inlined aliases
are removed.
Examples
--------
>>> dsk = {'a': (range, 5),
... 'b': 'a',
... 'c': 'b',
... 'd': (sum, 'c'),
... 'e': 'd',
... 'f': (inc, 'd')}
>>> dealias(dsk) # doctest: +SKIP
{'a': (range, 5),
'd': (sum, 'a'),
'f': (inc, 'd')}
>>> dsk = {'a': (range, 5),
... 'b': 'a'}
>>> dealias(dsk) # doctest: +SKIP
{'a': (range, 5)}
>>> dealias(dsk, keys=['a', 'b']) # doctest: +SKIP
{'a': (range, 5),
'b': 'a'}
"""
keys = keys or set()
if not isinstance(keys, set):
keys = set(keys)
if not dependencies:
dependencies = {k: get_dependencies(dsk, k)
for k in dsk}
aliases = set(k for k, task in dsk.items() if
ishashable(task) and task in dsk)
dsk2 = inline(dsk, aliases, inline_constants=False)
for k in aliases.difference(keys):
del dsk2[k]
return dsk2
def equivalent(term1, term2, subs=None):
"""Determine if two terms are equivalent, modulo variable substitution.
Equivalent to applying substitutions in `subs` to `term2`, then checking if
`term1 == term2`.
If a subterm doesn't support comparison (i.e. `term1 == term2` errors),
returns `False`.
Parameters
----------
term1, term2 : terms
subs : dict, optional
Mapping of substitutions from `term2` to `term1`
Examples
--------
>>> from operator import add
>>> term1 = (add, 'a', 'b')
>>> term2 = (add, 'x', 'y')
>>> subs = {'x': 'a', 'y': 'b'}
>>> equivalent(term1, term2, subs)
True
>>> subs = {'x': 'a'}
>>> equivalent(term1, term2, subs)
False
"""
# Quick escape for special cases
head_type = type(term1)
if type(term2) != head_type:
# If terms aren't same type, fail
return False
elif head_type not in (tuple, list):
# For literals, just compare
try:
# `is` is tried first, to allow objects that don't implement `==`
# to work for cases where term1 is term2. If `is` returns False,
# and `==` errors, then the only thing we can do is return False.
return term1 is term2 or term1 == term2
except:
return False
pot1 = preorder_traversal(term1)
pot2 = preorder_traversal(term2)
subs = {} if subs is None else subs
for t1, t2 in zip_longest(pot1, pot2, fillvalue=END):
if t1 is END or t2 is END:
# If terms aren't same length: fail
return False
elif ishashable(t2) and t2 in subs:
val = subs[t2]
else:
val = t2
try:
if t1 is not t2 and t1 != val:
return False
except:
return False
return True
def dependency_dict(dsk):
"""Create a dict matching ordered dependencies to keys.
Examples
--------
>>> from operator import add
>>> dsk = {'a': 1, 'b': 2, 'c': (add, 'a', 'a'), 'd': (add, 'b', 'a')}
>>> dependency_dict(dsk) # doctest: +SKIP
{(): ['a', 'b'], ('a', 'a'): ['c'], ('b', 'a'): ['d']}
"""
dep_dict = {}
for key in dsk:
deps = tuple(get_dependencies(dsk, key, True))
dep_dict.setdefault(deps, []).append(key)
return dep_dict
def _possible_matches(dep_dict, deps, subs):
deps2 = []
for d in deps:
v = subs.get(d, None)
if v is not None:
deps2.append(v)
else:
return []
deps2 = tuple(deps2)
return dep_dict.get(deps2, [])
def _sync_keys(dsk1, dsk2, dsk2_topo):
dep_dict1 = dependency_dict(dsk1)
subs = {}
for key2 in toposort(dsk2):
deps = tuple(get_dependencies(dsk2, key2, True))
# List of keys in dsk1 that have terms that *may* match key2
possible_matches = _possible_matches(dep_dict1, deps, subs)
if possible_matches:
val2 = dsk2[key2]
for key1 in possible_matches:
val1 = dsk1[key1]
if equivalent(val1, val2, subs):
subs[key2] = key1
break
return subs
def sync_keys(dsk1, dsk2):
"""Return a dict matching keys in `dsk2` to equivalent keys in `dsk1`.
Parameters
----------
dsk1, dsk2 : dict
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> sync_keys(dsk1, dsk2) # doctest: +SKIP
{'x': 'a', 'y': 'b'}
"""
return _sync_keys(dsk1, dsk2, toposort(dsk2))
def merge_sync(dsk1, dsk2):
"""Merge two dasks together, combining equivalent tasks.
If a task in `dsk2` exists in `dsk1`, the task and key from `dsk1` is used.
If a task in `dsk2` has the same key as a task in `dsk1` (and they aren't
equivalent tasks), then a new key is created for the task in `dsk2`. This
prevents name conflicts.
Parameters
----------
dsk1, dsk2 : dict
Variable names in `dsk2` are replaced with equivalent ones in `dsk1`
before merging.
Returns
-------
new_dsk : dict
The merged dask.
key_map : dict
A mapping between the keys from `dsk2` to their new names in `new_dsk`.
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5), 'z': (mul, 'b', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'y': 'b', 'z': 'z'}
Conflicting names are replaced with auto-generated names upon merging.
>>> dsk1 = {'a': 1, 'res': (add, 'a', 1)}
>>> dsk2 = {'x': 1, 'res': (add, 'x', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'res': (add, 'a', 1), 'merge_1': (add, 'a', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'res': 'merge_1'}
"""
dsk2_topo = toposort(dsk2)
sd = _sync_keys(dsk1, dsk2, dsk2_topo)
new_dsk = dsk1.copy()
for key in dsk2_topo:
if key in sd:
new_key = sd[key]
else:
if key in dsk1:
new_key = next(merge_sync.names)
else:
new_key = key
sd[key] = new_key
task = dsk2[key]
for a, b in sd.items():
task = subs(task, a, b)
new_dsk[new_key] = task
return new_dsk, sd
# store the name iterator in the function
merge_sync.names = ('merge_%d' % i for i in count(1))
def fuse_selections(dsk, head1, head2, merge):
"""Fuse selections with lower operation.
Handles graphs of the form:
``{key1: (head1, key2, ...), key2: (head2, ...)}``
Parameters
----------
dsk : dict
dask graph
head1 : function
The first element of task1
head2 : function
The first element of task2
merge : function
Takes ``task1`` and ``task2`` and returns a merged task to
replace ``task1``.
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> merge = lambda t1, t2: (load, t2[1], t2[2], t1[2])
>>> dsk2 = fuse_selections(dsk, getitem, load, merge)
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
dsk2 = dict()
for k, v in dsk.items():
try:
if (istask(v) and v[0] == head1 and v[1] in dsk and
istask(dsk[v[1]]) and dsk[v[1]][0] == head2):
dsk2[k] = merge(v, dsk[v[1]])
else:
dsk2[k] = v
except TypeError:
dsk2[k] = v
return dsk2
def fuse_getitem(dsk, func, place):
""" Fuse getitem with lower operation
Parameters
----------
dsk: dict
dask graph
func: function
A function in a task to merge
place: int
Location in task to insert the getitem key
Examples
--------
>>> def load(store, partition, columns):
... pass
>>> dsk = {'x': (load, 'store', 'part', ['a', 'b']),
... 'y': (getitem, 'x', 'a')}
>>> dsk2 = fuse_getitem(dsk, load, 3) # columns in arg place 3
>>> cull(dsk2, 'y')[0]
{'y': (<function load at ...>, 'store', 'part', 'a')}
"""
return fuse_selections(dsk, getitem, func,
lambda a, b: tuple(b[:place]) + (a[2], ) + tuple(b[place + 1:]))
# Defining `key_split` (used by `default_fused_keys_renamer`) in utils.py
# results in messy circular imports, so define it here instead.
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
u'x'
>>> key_split('x-1')
u'x'
>>> key_split('x-1-2-3')
u'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
u'x'
>>> key_split('hello-world-1')
u'hello-world'
>>> key_split(b'hello-world-1')
u'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
u'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
u'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].lstrip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
| {
"repo_name": "chrisbarber/dask",
"path": "dask/optimize.py",
"copies": "1",
"size": "22029",
"license": "bsd-3-clause",
"hash": 2286143074462763300,
"line_mean": 29.2181069959,
"line_max": 91,
"alpha_frac": 0.5274411004,
"autogenerated": false,
"ratio": 3.44203125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44694723504,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import re
from qtpy.QtCore import QModelIndex
from qtpy.QtGui import QStandardItem, QStandardItemModel
from qtpy.QtWidgets import QAction
from addie.utilities import customtreeview as base
from addie.widgets.filedialog import get_save_file
from addie.rietveld import event_handler
class BankRegexException(Exception):
""" Exception for bank regex not finding a match"""
pass
class BraggTree(base.CustomizedTreeView):
""" Tree widget to store Bragg workspace """
def __init__(self, parent):
"""
Initialize
Parameters
----------
parent
"""
base.CustomizedTreeView.__init__(self, parent)
# set up actions
self._action_plot = QAction('Plot', self)
self._action_plot.triggered.connect(self.do_plot_ws)
# to python
self._action_ipython = QAction('To IPython', self)
self._action_ipython.triggered.connect(self.do_copy_to_ipython)
# to delete
self._action_delete = QAction('Delete workspace', self)
self._action_delete.triggered.connect(self.do_delete_gsas)
# to merge GSAS file
self._action_merge_gss = QAction('Merge to GSAS', self)
self._action_merge_gss.triggered.connect(self.do_merge_to_gss)
# to select
self._action_select_node = QAction('Plot', self)
self._action_select_node.triggered.connect(self.do_select_gss_node)
# to de-select
self._action_deselect_node = QAction('Remove from plotting', self)
self._action_deselect_node.triggered.connect(self.do_remove_from_plot)
# class variables
self._main_window = None
self._workspaceNameList = None
# set to parent
if parent:
self.set_main_window(parent)
# reset
self.reset_bragg_tree()
def _get_bank_id(self, bank_wksp):
"""Get bank ID from a workspace name with the structure:
Bank 1 - <float for theta angle>
:param bank_wksp: Bank workspace name to strip out bank ID from
:type bank_wksp: str
:return: Bank ID as int
"""
bank_regex = r"Bank\s+(\d+)\s+-"
m = re.match(bank_regex, bank_wksp)
if m:
bank_id = m.group(1).strip()
else:
msg = "Did not find the bank ID in workspace name: {wksp} "
msg += "when using regular expression: {regex}"
msg = msg.format(wksp=bank_wksp, regex=bank_regex)
raise BankRegexException(msg)
return bank_id
def _get_tree_structure(self, model=None, parent_index=QModelIndex(), spaces=""):
""" Get the Bragg Tree structure information,
such as node names, number of children for each node, etc.
:param model: (optional) Model to print tree structure for
:type model: QAbstractItemModel
:param parent_index: (optional) Parent index to use for printing children of the Model
:type parent_index: QModelIndex
"""
if not model:
model = self.model()
if model.rowCount(parent_index) == 0:
return
for i in range(model.rowCount(parent_index)):
index = model.index(i,0, parent_index)
print("{}{}".format(spaces, model.data(index)))
if model.hasChildren(index):
self._get_tree_structure(model, index, spaces + " |--")
def add_bragg_ws_group(self, ws_group_name, bank_name_list):
"""
Add a workspace group containing a list of bank names as a main node
in the tree
Parameters
----------
ws_group_name
bank_name_list
Returns
-------
"""
# check inputs' validity
msg = 'ws_group_name must be a string but not {}.'
assert isinstance(ws_group_name, str), msg.format(type(ws_group_name))
is_it_a_list = isinstance(bank_name_list, list)
is_list_populated = len(bank_name_list) > 0
is_a_list_and_populated = is_it_a_list and is_list_populated
msg = 'Bank name list must be a non-empty list. Currently is: {}.'
assert is_a_list_and_populated, msg.format(type(bank_name_list))
# main node/leaf
main_leaf_value = str(ws_group_name)
self.add_main_item(main_leaf_value, True, True)
for bank_name in bank_name_list:
print("main_leaf_value:", main_leaf_value, "bank_name:", bank_name)
# add the tree
self.add_child_main_item(main_leaf_value, bank_name)
# register
self._workspaceNameList.append(bank_name)
def add_temp_ws(self, ws_name):
"""
Parameters
----------
ws_name
"""
self.add_child_main_item('workspaces', ws_name)
def do_copy_to_ipython(self):
# TO/NOW - Doc and check
# Get current index and item
current_index = self.currentIndex()
if isinstance(current_index, QModelIndex) is False:
msg = 'Current index is not QModelIndex instance, but {}.'
return False, msg.format(type(current_index))
assert (isinstance(current_index, QModelIndex))
current_item = self.model().itemFromIndex(current_index)
if isinstance(current_item, QStandardItem) is False:
msg = 'Current item is not QStandardItem instance, but {}.'
return False, msg.format(type(current_item))
assert (isinstance(current_item, QStandardItem))
ws_name = str(current_item.text())
python_cmd = "ws = mtd['%s']" % ws_name
if self._main_window is not None:
self._main_window.set_ipython_script(python_cmd)
def do_delete_gsas(self):
"""
Delete a GSAS workspace and its split workspaces,
and its item in the GSAS-tree as well.
"""
# get selected nodes
gsas_node_list = self.get_selected_items()
for gsas_node in gsas_node_list:
# delete the gsas group workspace (deletes sub-workspaces as well)
gsas_name = str(gsas_node.text())
gss_ws_name = gsas_name.split('_group')[0]
self._main_window.get_workflow().delete_workspace(gss_ws_name)
# delete the node from the tree
self.delete_node(gsas_node)
def do_merge_to_gss(self):
"""
Merge a selected GSAS workspace (with split workspaces)
to a new GSAS file
"""
# check prerequisite
assert self._main_window is not None, 'Main window is not set up.'
# get the selected GSAS node's name
status, ret_obj = self.get_current_main_nodes()
if not status:
print('[Error] Get current main nodes: %s.' % str(ret_obj))
return
gss_node_list = ret_obj
if len(gss_node_list) == 0:
return
elif len(gss_node_list) > 1:
msg = '[Error] Only 1 GSS node can be selected.'
msg += 'Current selected nodes are {}.'
print(msg.format(gss_node_list))
return
# pop-out a file dialog for GSAS file's name
file_ext = {'GSAS File (*.gsa)': 'gsa', 'Any File (*.*)': ''}
new_gss_file_name, _ = get_save_file(
self,
caption='New GSAS file name',
directory=self._main_window.get_default_data_dir(),
filter=file_ext)
if not new_gss_file_name: # user pressed cancel
return
# emit the signal to the main window
selected_node = self.get_selected_items()[0]
bank_ws_list = self.get_child_nodes(selected_node, output_str=True)
# write all the banks to a GSAS file
self._main_window.get_workflow().write_gss_file(
ws_name_list=bank_ws_list, gss_file_name=new_gss_file_name)
def do_plot_ws(self):
"""
Add selected runs
:return:
"""
# get the selected items of tree and sort them alphabetically
item_list = self.get_selected_items()
item_list = [str(item.text()) for item in item_list]
item_list.sort()
# FIXME/LATER - replace this by signal
if self._main_window is not None:
print("do_plot_ws: item_list", item_list)
ids = event_handler.get_bragg_banks_selected(self._main_window)
print("do_plot_ws: ids -", ids)
event_handler.plot_bragg(
self._main_window,
ws_list=item_list,
bankIds=ids,
clear_canvas=True)
else:
raise NotImplementedError('Main window has not been set up!')
def do_remove_from_plot(self):
"""
Remove a node's plot if it is plot on canvas
Returns
-------
"""
# get the selected gsas node
selected_nodes = self.get_selected_items()
if len(selected_nodes) == 0:
return
# remove it from canvas
for gss_node in selected_nodes:
gss_ws_name = str(gss_node.text())
gss_bank_names = self.get_child_nodes(gss_node, output_str=True)
self.remove_gss_from_plot(self._main_window,
gss_ws_name,
gss_bank_names)
def do_reset_gsas_tab(self, main_window):
"""
Reset the GSAS-tab including
1. deleting all the GSAS workspaces
2. clearing the GSAS tree
3. clearing GSAS canvas
"""
bragg_list = main_window.calculategr_ui.treeWidget_braggWSList
# delete all workspaces: get GSAS workspaces from tree
gsas_group_node_list = bragg_list.get_main_nodes(output_str=False)
for gsas_group_node in gsas_group_node_list:
# skip if the workspace is 'workspaces'
gss_node_name = str(gsas_group_node.text())
if gss_node_name == 'workspaces':
continue
# get the split workspaces' names and delete
gsas_ws_name_list = bragg_list.get_child_nodes(
gsas_group_node,
output_str=True)
for workspace in gsas_ws_name_list:
main_window._myController.delete_workspace(workspace)
# guess for the main workspace and delete
gss_main_ws = gss_node_name.split('_group')[0]
main_window._myController.delete_workspace(
gss_main_ws, no_throw=True)
# reset the GSAS tree
bragg_list.reset_bragg_tree()
# clear checkboxes for banks
main_window.clear_bank_checkboxes()
# clear the canvas
main_window.rietveld_ui.graphicsView_bragg.reset()
def do_select_gss_node(self):
"""
Select a GSAS node such that this workspace (group)
will be plotted to canvas
Returns
-------
"""
# get selected nodes
selected_nodes = self.get_selected_items()
# set to plot
for gss_group_node in selected_nodes:
gss_group_name = str(gss_group_node.text())
self._main_window.set_bragg_ws_to_plot(gss_group_name)
def get_current_main_nodes(self):
"""
Get the name of the current nodes that are selected
The reason to put the method here is that it is assumed that the tree
only has 2 level (main and leaf)
Returns: 2-tuple: boolean, a list of strings as main nodes' names
"""
# Get current index and item
current_index = self.currentIndex()
if isinstance(current_index, QModelIndex) is False:
msg = 'Current index is not QModelIndex instance, but {}.'
return False, msg.format(type(current_index))
assert (isinstance(current_index, QModelIndex))
# Get all selected indexes and get their main-node (or itself)'s name
main_node_list = list()
q_indexes = self.selectedIndexes()
for q_index in q_indexes:
# get item by QIndex
this_item = self.model().itemFromIndex(q_index)
# check
if isinstance(this_item, QStandardItem) is False:
msg = 'Current item is not QStandardItem instance, but {}.'
return False, msg.format(type(this_item))
# get node name of parent's node name
if this_item.parent() is not None:
node_name = str(this_item.parent().text())
else:
node_name = str(this_item.text())
main_node_list.append(node_name)
return True, main_node_list
# override
def mousePressEvent(self, e):
"""
Over ride mouse press event
Parameters
----------
e :: event
Returns
-------
"""
button_pressed = e.button()
if button_pressed == 2:
# override the response for right button
self.pop_up_menu()
else:
# keep base method for other buttons
base.CustomizedTreeView.mousePressEvent(self, e)
def pop_up_menu(self):
"""
Parameters
----------
Returns
-------
"""
selected_items = self.get_selected_items()
if len(selected_items) == 0:
return
leaf_level = -1
for item in selected_items:
if item.parent() is None and leaf_level == -1:
leaf_level = 1
elif item.parent() is not None and leaf_level == -1:
leaf_level = 2
elif item.parent() is None and leaf_level != 1:
print('[Error] Nodes of different levels are selected.')
elif item.parent() is None and leaf_level != 2:
print('[Error] Nodes of different levels are selected.')
if leaf_level == 1:
self.removeAction(self._action_plot)
self.addAction(self._action_select_node)
self.addAction(self._action_ipython)
self.addAction(self._action_merge_gss)
self.addAction(self._action_deselect_node)
self.addAction(self._action_delete)
elif leaf_level == 2:
self.addAction(self._action_plot)
self.removeAction(self._action_select_node)
self.removeAction(self._action_merge_gss)
self.addAction(self._action_ipython)
self.removeAction(self._action_deselect_node)
self.removeAction(self._action_delete)
def remove_gss_from_plot(self, main_window, gss_group_name, gss_wksps):
"""Remove a GSAS group from canvas if they exits
:param gss_group_name: name of the GSS node, i.e.,
GSS workspace group's name
:param gss_wksps: list of names of GSS single banks' workspace name
:return:
"""
# checks
msg = 'GSS group workspace name must be a string but not {0}.'
msg = msg.format(type(gss_group_name))
assert isinstance(gss_group_name, str), msg
msg = 'GSAS-single-bank workspace names {0} must be list, not {1}.'
msg = msg.format(gss_wksps, type(gss_wksps))
assert isinstance(gss_wksps, list), msg
if len(gss_wksps) == 0:
raise RuntimeError(
'GSAS-single-bank workspace name list is empty!')
# get bank IDs
bank_ids = list()
for gss_bank_ws in gss_wksps:
bank_id = self._get_bank_id(gss_bank_ws)
bank_ids.append(bank_id)
graphicsView_bragg = main_window.rietveld_ui.graphicsView_bragg
# remove
graphicsView_bragg.remove_gss_banks(gss_group_name, bank_ids)
# check if there is no such bank's plot on figure
# make sure the checkbox is unselected
# turn on the mutex lock
main_window._noEventBankWidgets = True
for bank_id in range(1, 7):
has_plot_on_canvas = len(
graphicsView_bragg.get_ws_name_on_canvas(bank_id)) > 0
main_window._braggBankWidgets[bank_id].setChecked(
has_plot_on_canvas)
# turn off the mutex lock
main_window._noEventBankWidgets = False
def reset_bragg_tree(self):
"""
Clear the leaves of the tree only leaving the main node 'workspaces'
Returns
-------
"""
# clear all
if self.model() is not None:
self.model().clear()
# reset workspace names list
self._workspaceNameList = list()
self._myHeaderList = list()
self._leafDict.clear()
# re-initialize the model
self._myNumCols = 1
model = QStandardItemModel()
model.setColumnCount(self._myNumCols)
self.setModel(model)
self.init_setup(['Bragg Workspaces'])
self.add_main_item('workspaces', append=True, as_current_index=False)
def set_main_window(self, parent_window):
"""
Set the main window (parent window) to this tree
Parameters
----------
parent_window
Returns
-------
"""
# check
assert parent_window is not None, 'Parent window cannot be None'
self._main_window = parent_window
| {
"repo_name": "neutrons/FastGR",
"path": "addie/rietveld/braggtree.py",
"copies": "1",
"size": "17364",
"license": "mit",
"hash": 113451641830986780,
"line_mean": 33.3841584158,
"line_max": 94,
"alpha_frac": 0.5738309145,
"autogenerated": false,
"ratio": 3.893273542600897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9966690971299872,
"avg_score": 0.00008269716020489657,
"num_lines": 505
} |
from __future__ import absolute_import, division, print_function
import re
from subprocess import check_call, CalledProcessError
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
from .compatibility import BytesIO
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
return func.__name__
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
m = re.search(_HASHPAT, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
g = Digraph(graph_attr={'rankdir': 'BT'})
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
def dot_graph(dsk, filename='mydask', **kwargs):
g = to_graphviz(dsk, **kwargs)
if filename is not None:
g.save(filename + '.dot')
try:
check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),
shell=True)
check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),
shell=True)
except CalledProcessError:
msg = ("Please install The `dot` utility from graphviz:\n"
" Debian: sudo apt-get install graphviz\n"
" Mac OSX: brew install graphviz\n"
" Windows: http://www.graphviz.org/Download..php")
raise RuntimeError(msg) # pragma: no cover
try:
from IPython.display import Image
return Image(filename + '.png')
except ImportError:
pass
else:
try:
from IPython.display import Image
s = BytesIO()
s.write(g.pipe(format='png'))
s.seek(0)
return Image(s.read())
except ImportError:
pass
| {
"repo_name": "ssanderson/dask",
"path": "dask/dot.py",
"copies": "1",
"size": "4440",
"license": "bsd-3-clause",
"hash": -4877502341369513000,
"line_mean": 25.5868263473,
"line_max": 80,
"alpha_frac": 0.5137387387,
"autogenerated": false,
"ratio": 3.657331136738056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46710698754380564,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from toolz import take
from datashape import discover, isdimension
from .compatibility import basestring, map
from .compute.mongo import dispatch
from .resource import resource
try:
import pymongo
from pymongo.collection import Collection
from pymongo import ASCENDING
except ImportError:
Collection = type(None)
__all__ = ['discover', 'drop', 'create_index']
@dispatch(Collection)
def discover(coll, n=50):
items = list(take(n, coll.find()))
for item in items:
del item['_id']
ds = discover(items)
if isdimension(ds[0]):
return coll.count() * ds.subshape[0]
else:
raise ValueError("Consistent datashape not found")
@dispatch(Collection)
def drop(m):
m.drop()
@dispatch(object)
def scrub_keys(o):
"""Add an ascending sort key when pass a string, to make the MongoDB
interface similar to SQL.
"""
raise NotImplementedError("scrub_keys not implemented for type %r" %
type(o).__name__)
@dispatch(basestring)
def scrub_keys(s):
return s, ASCENDING
@dispatch(tuple)
def scrub_keys(t):
return t
@dispatch(list)
def scrub_keys(seq):
for el in seq:
if not isinstance(el, (tuple, basestring)):
raise TypeError('indexing keys must be a string or pair of '
'(<column name>, <parameter>)')
yield scrub_keys(el)
@dispatch(Collection, basestring)
def create_index(coll, key, **kwargs):
coll.create_index(key, **kwargs)
@dispatch(Collection, list)
def create_index(coll, keys, **kwargs):
coll.create_index(list(scrub_keys(keys)), **kwargs)
@resource.register('mongodb://\w*:\w*@\w*.*', priority=11)
def resource_mongo_with_authentication(uri, collection_name, **kwargs):
pattern = 'mongodb://(?P<user>\w*):(?P<pass>\w*)@(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
@resource.register('mongodb://.+')
def resource_mongo(uri, collection_name, **kwargs):
pattern = 'mongodb://(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
def _resource_mongo(d, collection_name):
client = pymongo.MongoClient(d['hostport'])
db = getattr(client, d['database'])
if d.get('user'):
db.authenticate(d['user'], d['pass'])
coll = getattr(db, collection_name)
return coll
| {
"repo_name": "vitan/blaze",
"path": "blaze/mongo.py",
"copies": "1",
"size": "2528",
"license": "bsd-3-clause",
"hash": 1888218301617784800,
"line_mean": 24.5353535354,
"line_max": 93,
"alpha_frac": 0.6499208861,
"autogenerated": false,
"ratio": 3.5159944367176634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4665915322817663,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import os
from functools import partial
from .compatibility import apply
from .core import istask, get_dependencies, ishashable
from .utils import funcname, import_required
graphviz = import_required("graphviz", "Drawing dask graphs requires the "
"`graphviz` python library and the "
"`graphviz` system library to be "
"installed.")
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if func is apply:
func = task[1]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(func)
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
>>> from uuid import uuid1
>>> x = 'x-%s-hello' % uuid1()
>>> x # doctest: +SKIP
'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
for pattern in (_HASHPAT, _UUIDPAT):
m = re.search(pattern, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None,
rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
graph_attr = graph_attr or {}
graph_attr['rankdir'] = rankdir
graph_attr.update(kwargs)
g = graphviz.Digraph(graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr)
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format=None, **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']
if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):
filename, format = os.path.splitext(filename)
format = format[1:].lower()
if format is None:
format = 'png'
data = g.pipe(format=format)
if not data:
raise RuntimeError("Graphviz failed to properly produce an image. "
"This probably means your installation of graphviz "
"is missing png support. See: "
"https://github.com/ContinuumIO/anaconda-issues/"
"issues/485 for more information.")
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return display_cls(filename=full_filename)
| {
"repo_name": "cpcloud/dask",
"path": "dask/dot.py",
"copies": "3",
"size": "7527",
"license": "bsd-3-clause",
"hash": -1228811108125859600,
"line_mean": 29.2289156627,
"line_max": 87,
"alpha_frac": 0.5601169124,
"autogenerated": false,
"ratio": 3.7522432701894317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023880801462673667,
"num_lines": 249
} |
from __future__ import absolute_import, division, print_function
import re
import os
from functools import partial
from .core import istask, get_dependencies, ishashable
from .utils import funcname, import_required
graphviz = import_required("graphviz", "Drawing dask graphs requires the "
"`graphviz` python library and the "
"`graphviz` system library to be "
"installed.")
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
>>> from uuid import uuid1
>>> x = 'x-%s-hello' % uuid1()
>>> x # doctest: +SKIP
'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
for pattern in (_HASHPAT, _UUIDPAT):
m = re.search(pattern, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
attributes = {'rankdir': 'BT'}
attributes.update(kwargs)
g = graphviz.Digraph(graph_attr=attributes)
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format=None, **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']
if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):
filename, format = os.path.splitext(filename)
format = format[1:].lower()
if format is None:
format = 'png'
data = g.pipe(format=format)
if not data:
raise RuntimeError("Graphviz failed to properly produce an image. "
"This probably means your installation of graphviz "
"is missing png support. See: "
"https://github.com/ContinuumIO/anaconda-issues/"
"issues/485 for more information.")
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return display_cls(filename=full_filename)
| {
"repo_name": "cowlicks/dask",
"path": "dask/dot.py",
"copies": "2",
"size": "7248",
"license": "bsd-3-clause",
"hash": -3474547760852108000,
"line_mean": 28.9504132231,
"line_max": 87,
"alpha_frac": 0.5616721854,
"autogenerated": false,
"ratio": 3.7476732161323683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019875850941496306,
"num_lines": 242
} |
from __future__ import absolute_import, division, print_function
import re
import os
from functools import partial
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
from .utils import funcname
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
>>> from uuid import uuid1
>>> x = 'x-%s-hello' % uuid1()
>>> x # doctest: +SKIP
'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
for pattern in (_HASHPAT, _UUIDPAT):
m = re.search(pattern, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
attributes = {'rankdir': 'BT'}
attributes.update(kwargs)
g = Digraph(graph_attr=attributes)
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format=None, **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']
if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):
filename, format = os.path.splitext(filename)
format = format[1:].lower()
if format is None:
format = 'png'
data = g.pipe(format=format)
if not data:
raise RuntimeError("Graphviz failed to properly produce an image. "
"This probably means your installation of graphviz "
"is missing png support. See: "
"https://github.com/ContinuumIO/anaconda-issues/"
"issues/485 for more information.")
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return display_cls(filename=full_filename)
| {
"repo_name": "mikegraham/dask",
"path": "dask/dot.py",
"copies": "1",
"size": "6972",
"license": "bsd-3-clause",
"hash": -2134006818025583900,
"line_mean": 28.2941176471,
"line_max": 87,
"alpha_frac": 0.5672690763,
"autogenerated": false,
"ratio": 3.688888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4756157965188889,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import random
from glue.core.component_link import ComponentLink
from glue.core.subset import Subset, SubsetState
from glue.core.data import ComponentID
TAG_RE = re.compile('\{\s*(?P<tag>\S+)\s*\}')
__all__ = ['ParsedCommand', 'ParsedSubsetState']
def _ensure_only_component_references(cmd, references):
""" Search through tag references in a command, ensure that
they all reference ComponentIDs
Parameters
----------
cmd : string. A template command
referenes : a mapping from tags to substitution objects
Raises
------
TypeError, if cmd does not refer only to ComponentIDs
"""
for match in TAG_RE.finditer(cmd):
tag = match.group('tag')
if tag not in references or not \
isinstance(references[tag], ComponentID):
raise TypeError(
"Reference to %s, which is not a ComponentID" % tag)
def _reference_list(cmd, references):
""" Return a list of the values in the references mapping whose
keys appear in the command
Parameters
----------
cmd : string. A template command
references : a mapping from tags to substitution objects
Returns
-------
A list of the unique values in references that appear in the command
Examples
--------
>>> cmd = '{g} - {r} + {g}'
>>> references = {'g' : g_object, 'r' : r_object, 'i' : i_object}
>>> _reference_list(cmd, references)
[g_object, r_object]
Raises
------
KeyError: if tags in the command aren't in the reference mapping
"""
try:
return list(set(references[m.group('tag')]
for m in TAG_RE.finditer(cmd)))
except KeyError:
raise KeyError("Tags from command not in reference mapping")
def _dereference(cmd, references):
""" Dereference references in the template command, to refer
to objects in the reference mapping
Parameters
----------
cmd : Command string
references : mapping from template tags to objects
Returns
-------
A new command, where all the tags have been subsituted as follows:
"{tag}" -> 'data[references["tag"], __view]', if references[tag] is a ComponentID
"{tag}" -> 'references["tag"].to_mask(__view)' if references[tag] is a Subset
__view is a placeholder variable referencing the view
passed to data.__getitem__ and subset.to_mask
Raises
------
TypeError, if a tag in the command maps to something other than
a ComponentID or Subset object
"""
def sub_func(match):
tag = match.group('tag')
if isinstance(references[tag], ComponentID):
return 'data[references["%s"], __view]' % tag
elif isinstance(references[tag], Subset):
return 'references["%s"].to_mask(__view)' % tag
else:
raise TypeError("Tag %s maps to unrecognized type: %s" %
(tag, type(references[tag])))
return TAG_RE.sub(sub_func, cmd)
def _dereference_random(cmd):
"""
Dereference references in the template command, to refer
to random floating-point values. This is used to quickly test that the
command evaluates without errors.
Parameters
----------
cmd : str
Command string
Returns
-------
A new command, where all the tags have been subsituted by floating point values
"""
def sub_func(match):
tag = match.group('tag')
return str(random.random())
return TAG_RE.sub(sub_func, cmd)
class InvalidTagError(ValueError):
def __init__(self, tag, references):
msg = ("Tag %s not in reference mapping: %s" %
(tag, sorted(references.keys())))
self.tag = tag
self.references = references
super(InvalidTagError, self).__init__(msg)
def _validate(cmd, references):
""" Make sure all references in the command are in the reference mapping
Raises
------
TypeError, if a tag is missing from references
"""
for match in TAG_RE.finditer(cmd):
tag = match.group('tag')
if tag not in references:
raise InvalidTagError(tag, references)
class ParsedCommand(object):
""" Class to manage commands that define new components and subsets """
def __init__(self, cmd, references):
""" Create a new parsed command object
Parameters
----------
cmd : str. A template command. Can only reference ComponentID objects
references : mapping from command templates to substitution objects
"""
_validate(cmd, references)
self._cmd = cmd
self._references = references
def ensure_only_component_references(self):
_ensure_only_component_references(self._cmd, self._references)
@property
def reference_list(self):
return _reference_list(self._cmd, self._references)
def evaluate(self, data, view=None):
from glue import env
# pylint: disable=W0613, W0612
references = self._references
cmd = _dereference(self._cmd, self._references)
scope = vars(env)
scope['__view'] = view
global_variables = vars(env)
# We now import math modules if not already defined in local or
# global variables
if 'numpy' not in global_variables and 'numpy' not in locals():
import numpy
if 'np' not in global_variables and 'np' not in locals():
import numpy as np
if 'math' not in global_variables and 'math' not in locals():
import math
return eval(cmd, global_variables, locals()) # careful!
def evaluate_test(self, view=None):
from glue import env
cmd = _dereference_random(self._cmd)
scope = vars(env)
scope['__view'] = view
global_variables = vars(env)
# We now import math modules if not already defined in local or
# global variables
if 'numpy' not in global_variables and 'numpy' not in locals():
import numpy
if 'np' not in global_variables and 'np' not in locals():
import numpy as np
if 'math' not in global_variables and 'math' not in locals():
import math
return eval(cmd, global_variables, locals()) # careful!
def __gluestate__(self, context):
return dict(cmd=self._cmd,
references=dict((k, context.id(v))
for k, v in self._references.items()))
@classmethod
def __setgluestate__(cls, rec, context):
cmd = rec['cmd']
ref = dict((k, context.object(v))
for k, v in rec['references'].items())
return cls(cmd, ref)
class ParsedComponentLink(ComponentLink):
""" Class to create a new ComponentLink from a ParsedCommand object. """
def __init__(self, to_, parsed):
""" Create a new link
Parameters
----------
to_ : ComponentID instance to associate with the new component
parsed : A ParsedCommand object
"""
parsed.ensure_only_component_references()
super(ParsedComponentLink, self).__init__(
parsed.reference_list, to_, lambda: None)
self._parsed = parsed
def compute(self, data, view=None):
return self._parsed.evaluate(data, view)
def __gluestate__(self, context):
return dict(parsed=context.do(self._parsed),
to=context.id(self.get_to_id()))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['to']),
context.object(rec['parsed']))
class ParsedSubsetState(SubsetState):
""" A SubsetState defined by a ParsedCommand object """
def __init__(self, parsed):
""" Create a new object
Parameters
----------
parsed : A ParsedCommand object
"""
super(ParsedSubsetState, self).__init__()
self._parsed = parsed
def to_mask(self, data, view=None):
""" Calculate the new mask by evaluating the dereferenced command """
result = self._parsed.evaluate(data)
if view is not None:
result = result[view]
return result
| {
"repo_name": "saimn/glue",
"path": "glue/core/parse.py",
"copies": "3",
"size": "8323",
"license": "bsd-3-clause",
"hash": -8315131398652751000,
"line_mean": 29.4871794872,
"line_max": 87,
"alpha_frac": 0.6027874564,
"autogenerated": false,
"ratio": 4.357591623036649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6460379079436649,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import six
import pandas as pd
import numpy as np
__all__ = ['to_datetime']
# Compatibility functions for older pandas versions.
if tuple(map(int, pd.__version__.split('.')[:2])) < (0, 17):
def _pd_to_datetime_coerce(arg):
return pd.to_datetime(arg, coerce=True)
def _pd_to_numeric_coerce(arg):
if not isinstance(arg, pd.Series):
arg = pd.Series(arg)
return arg.convert_objects(
convert_dates=False, convert_numeric=True,
convert_timedeltas=False)
else:
def _pd_to_datetime_coerce(arg):
return pd.to_datetime(arg, errors='coerce')
def _pd_to_numeric_coerce(arg):
return pd.to_numeric(arg, errors='coerce')
def _split_arg(arg):
"""Split a comma-separated string into a list."""
if isinstance(arg, six.string_types):
arg = [it for it in re.split(r'[\s,]+', arg) if it]
return arg
def _extract_series_name(ds):
"""Extract series name from record set."""
m = re.match(r'^\s*([\w\.]+).*$', ds)
return m.group(1) if m is not None else None
def to_datetime(tstr, force=False):
"""
Parse JSOC time strings.
In general, this is quite complicated, because of the many
different (non-standard) time strings supported by the DRMS. For
more (much more!) details on this matter, see
`Rick Bogart's notes <http://jsoc.stanford.edu/doc/timerep.html>`__.
The current implementation only tries to convert typical HMI time
strings, with a format like "%Y.%m.%d_%H:%M:%S_TAI", to an ISO time
string, that is then parsed by pandas. Note that "_TAI", aswell as
other timezone indentifiers like "Z", will not be taken into
account, so the result will be a naive timestamp without any
associated timezone.
If you know the time string format, it might be better calling
pandas.to_datetime() directly. For handling TAI timestamps, e.g.
converting between TAI and UTC, the astropy.time package can be
used.
Parameters
----------
tstr : string or list/Series of strings
DateTime strings.
force : bool
Set to True to omit the endswith('_TAI') check.
Returns
-------
result : pandas.Series or pandas.Timestamp
Pandas series or a single Timestamp object.
"""
s = pd.Series(tstr).astype(str)
if force or s.str.endswith('_TAI').any():
s = s.str.replace('_TAI', '')
s = s.str.replace('_', ' ')
s = s.str.replace('.', '-', n=2)
res = _pd_to_datetime_coerce(s)
res = res.dt.tz_localize(None) # remove any timezone information
return res.iloc[0] if (len(res) == 1) and np.isscalar(tstr) else res
| {
"repo_name": "kbg/drms",
"path": "drms/utils.py",
"copies": "1",
"size": "2740",
"license": "mit",
"hash": 2826334684050906600,
"line_mean": 32.0120481928,
"line_max": 72,
"alpha_frac": 0.6375912409,
"autogenerated": false,
"ratio": 3.586387434554974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4723978675454974,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.