text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
from collections import Iterator
import math
import uuid
import numpy as np
import pandas as pd
from pandas.core.categorical import is_categorical_dtype
from toolz import merge
from ..optimize import cull
from ..base import tokenize
from .core import DataFrame, Series, _Frame, map_partitions, _concat
from dask.dataframe.categorical import (strip_categories, _categorize,
get_categories)
from .utils import shard_df_on_index
from ..utils import digit, insert
def set_index(df, index, npartitions=None, method=None, compute=True,
drop=True, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order.
This shuffles and repartitions your data. If done in parallel the
resulting order is non-deterministic.
"""
if isinstance(index, (DataFrame, tuple, list)):
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index))
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
.quantile(np.linspace(0, 1, npartitions + 1))
.compute()).tolist()
return set_partition(df, index, divisions, compute=compute,
method=method, drop=drop, **kwargs)
def new_categories(categories, index):
""" Flop around index for '.index' """
if index in categories:
categories = categories.copy()
categories['.index'] = categories.pop(index)
return categories
def set_partition(df, index, divisions, method=None, compute=False, drop=True,
max_branch=32, **kwargs):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
drop: bool, default True
Whether to delete columns to be used as the new index
method: str (optional)
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
max_branch: int (optional)
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
See Also
--------
set_index
shuffle
partd
"""
if method is None:
method = 'disk'
if method == 'disk':
return set_partition_disk(df, index, divisions, compute=compute,
drop=drop, **kwargs)
elif method == 'tasks':
return set_partition_tasks(df, index, divisions,
max_branch=max_branch, drop=drop)
else:
raise NotImplementedError("Unknown method %s" % method)
def barrier(args):
list(args)
return 0
def _set_partition(df, index, divisions, p, drop=True):
""" Shard partition and dump into partd """
df = df.set_index(index, drop=drop)
divisions = list(divisions)
shards = shard_df_on_index(df, divisions[1:-1])
shards = list(map(strip_categories, shards))
p.append(dict(enumerate(shards)))
def _set_collect(group, p, barrier_token, columns):
""" Get new partition dataframe from partd """
try:
return p.get(group)
except ValueError:
assert columns is not None, columns
# when unable to get group, create dummy DataFrame
# which has the same columns as original
return pd.DataFrame(columns=columns)
def shuffle(df, index, npartitions=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, index, npartitions)
always_new_token = uuid.uuid1().hex
import partd
p = ('zpartd-' + always_new_token,)
dsk1 = {p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,),
(partd.File,)))}
# Partition data on disk
name = 'shuffle-partition-' + always_new_token
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(partition, part, ind, npartitions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(partition, part, index, npartitions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect-' + token
meta = df._pd
dsk4 = dict(((name, i),
(collect, i, p, meta, barrier_token))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
return DataFrame(dsk, name, df.columns, divisions)
def partitioning_index(df, npartitions):
"""Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
if isinstance(df, (pd.Series, pd.Index)):
h = hash_series(df).astype('int64')
elif isinstance(df, pd.DataFrame):
cols = df.iteritems()
h = hash_series(next(cols)[1]).astype('int64')
for _, col in cols:
h = np.multiply(h, 3, h)
h = np.add(h, hash_series(col), h)
else:
raise TypeError("Unexpected type %s" % type(df))
return h % int(npartitions)
def hash_series(s):
"""Given a series, return a numpy array of deterministic integers."""
vals = s.values
dt = vals.dtype
if is_categorical_dtype(dt):
return vals.codes
elif np.issubdtype(dt, np.integer):
return vals
elif np.issubdtype(dt, np.floating):
return np.nan_to_num(vals).astype('int64')
elif dt == np.bool:
return vals.view('int8')
elif np.issubdtype(dt, np.datetime64) or np.issubdtype(dt, np.timedelta64):
return vals.view('int64')
else:
return s.apply(hash).values
def partition(df, index, npartitions, p):
""" Partition a dataframe along a grouper, store partitions to partd """
rng = pd.Series(np.arange(len(df)))
if isinstance(index, Iterator):
index = list(index)
if not isinstance(index, (pd.Index, pd.Series, pd.DataFrame)):
index = df[index]
groups = rng.groupby(partitioning_index(index, npartitions))
d = dict((i, df.iloc[groups.groups[i]]) for i in range(npartitions)
if i in groups.groups)
p.append(d)
def collect(group, p, meta, barrier_token):
""" Collect partitions from partd, yield dataframes """
res = p.get(group)
return res if len(res) > 0 else meta
def shuffle_pre_partition(df, index, divisions, drop):
if np.isscalar(index):
ind = df[index]
else:
ind = index
parts = pd.Series(divisions).searchsorted(ind, side='right') - 1
parts[(ind == divisions[-1]).values] = len(divisions) - 2
result = (df.assign(partitions=parts, new_index=ind)
.set_index('partitions', drop=drop))
return result
def shuffle_pre_partition_scalar(df, index, divisions, drop):
ind = df[index]
parts = pd.Series(divisions).searchsorted(ind, side='right') - 1
parts[(ind == divisions[-1]).values] = len(divisions) - 2
result = (df.assign(partitions=parts)
.set_index('partitions', drop=drop))
return result
def shuffle_pre_partition_series(df, index, divisions, drop):
parts = pd.Series(divisions).searchsorted(index, side='right') - 1
parts[(index == divisions[-1]).values] = len(divisions) - 2
result = (df.assign(partitions=parts, new_index=index)
.set_index('partitions', drop=drop))
return result
def shuffle_group(df, stage, k):
if pd.__version__ >= '0.17':
index = df.index // k ** stage % k
else:
values = df.index.values // k ** stage % k
index = df.index.copy()
index.values[:] = values
inds = set(index.drop_duplicates())
df = df.set_index(index)
result = dict(((i, df.loc[i] if i in inds else df.head(0))
for i in range(k)))
if isinstance(df, pd.DataFrame):
result = dict((k, pd.DataFrame(v).transpose()
if isinstance(v, pd.Series) else v)
for k, v in result.items())
return result
def shuffle_post_scalar(df, index_name):
return df.set_index(index_name, drop=True)
def shuffle_post_series(df, index_name):
df = df.set_index('new_index', drop=True)
df.index.name = index_name
return df
def set_partition_tasks(df, index, divisions, max_branch=32, drop=True):
max_branch = max_branch or 32
n = df.npartitions
assert len(divisions) == n + 1
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(n)]
sinputs = set(inputs)
if np.isscalar(index):
meta = shuffle_pre_partition_scalar(df._pd, index, divisions, drop)
meta = meta.drop(index, axis=1)
df2 = map_partitions(shuffle_pre_partition_scalar, meta, df, index,
divisions, drop)
else:
meta = df._pd.copy()
meta.index = index._pd
df2 = map_partitions(shuffle_pre_partition_series, meta, df, index,
divisions, drop)
token = tokenize(df, index, divisions, max_branch, drop)
start = dict((('shuffle-join-' + token, 0, inp), (df2._name, i))
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(shuffle_group,
('shuffle-join-' + token, stage - 1, inp),
stage - 1, k))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(dict.get, ('shuffle-group-' + token, stage, inp), i, {}))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(_concat,
[('shuffle-split-' + token, stage, inp[stage-1],
insert(inp, stage - 1, j)) for j in range(k)
if insert(inp, stage - 1, j) in sinputs]))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
if np.isscalar(index):
end = dict((('shuffle-' + token, i),
(shuffle_post_scalar, ('shuffle-join-' + token, stages, inp),
index))
for i, inp in enumerate(inputs))
else:
end = dict((('shuffle-' + token, i),
(shuffle_post_series, ('shuffle-join-' + token, stages, inp),
index.name))
for i, inp in enumerate(inputs))
dsk = merge(df2.dask, start, end, *(groups + splits + joins))
meta = df._pd.set_index(index if np.isscalar(index) else index._pd)
return DataFrame(dsk, 'shuffle-' + token, meta, divisions)
def set_partition_disk(df, index, divisions, compute=False, drop=True, **kwargs):
""" Group DataFrame by index using local disk for staging
See Also
--------
partd
"""
if isinstance(index, Series):
assert df.divisions == index.divisions
metadata = df._pd.set_index(index._pd, drop=drop)
elif np.isscalar(index):
metadata = df._pd.set_index(index, drop=drop)
else:
raise ValueError('index must be Series or scalar, {0} given'.format(type(index)))
token = tokenize(df, index, divisions)
always_new_token = uuid.uuid1().hex
import partd
p = ('zpartd-' + always_new_token,)
# Get Categories
catname = 'set-partition--get-categories-old-' + always_new_token
catname2 = 'set-partition--get-categories-new-' + always_new_token
dsk1 = {catname: (get_categories, df._keys()[0]),
p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,), (partd.File,))),
catname2: (new_categories, catname,
index.name if isinstance(index, Series) else index)}
# Partition data on disk
name = 'set-partition--partition-' + always_new_token
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(_set_partition, part, ind, divisions, p, drop))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(_set_partition, part, index, divisions, p, drop))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
if compute:
dsk = merge(df.dask, dsk1, dsk2, dsk3)
if isinstance(index, _Frame):
dsk.update(index.dask)
p, barrier_token, categories = df._get(dsk, [p, barrier_token, catname2], **kwargs)
dsk4 = {catname2: categories}
else:
dsk4 = {}
# Collect groups
name = 'set-partition--collect-' + token
if compute and not categories:
dsk4.update(dict(((name, i),
(_set_collect, i, p, barrier_token, df.columns))
for i in range(len(divisions) - 1)))
else:
dsk4.update(dict(((name, i),
(_categorize, catname2,
(_set_collect, i, p, barrier_token, df.columns)))
for i in range(len(divisions) - 1)))
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, Series):
dsk.update(index.dask)
if compute:
dsk, _ = cull(dsk, list(dsk4.keys()))
return DataFrame(dsk, name, metadata, divisions)
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/shuffle.py",
"copies": "1",
"size": "15669",
"license": "bsd-3-clause",
"hash": 5250085827925435000,
"line_mean": 32.5524625268,
"line_max": 91,
"alpha_frac": 0.585295807,
"autogenerated": false,
"ratio": 3.8161227471992207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49014185541992206,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import operator
import uuid
try:
from cytoolz import curry, first
except ImportError:
from toolz import curry, first
from . import base, threaded
from .compatibility import apply
from .core import quote
from .context import _globals, defer_to_globals
from .optimize import dont_optimize
from .utils import concrete, funcname, methodcaller, ensure_dict
from . import sharedict
__all__ = ['Delayed', 'delayed']
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dask(expr):
"""Normalize a python object and merge all sub-graphs.
- Replace ``Delayed`` with their keys
- Convert literals to things the schedulers can handle
- Extract dask graphs from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Delayed``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dask : a merged dask graph that forms the dag for this task
Examples
--------
>>> a = delayed(1, 'a')
>>> b = delayed(2, 'b')
>>> task, dask = to_task_dask([a, b, 3])
>>> task # doctest: +SKIP
['a', 'b', 3]
>>> dict(dask) # doctest: +SKIP
{'a': 1, 'b': 2}
>>> task, dasks = to_task_dask({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, [['a', 1], ['b', 2]])
>>> dict(dask) # doctest: +SKIP
{'a': 1, 'b': 2}
"""
if isinstance(expr, Delayed):
return expr.key, expr.dask
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(ensure_dict(expr.dask), keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, dsk
if isinstance(expr, tuple) and type(expr) != tuple:
return expr, {}
if isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip((to_task_dask(e) for e in expr), 2)
args = list(args)
dsk = sharedict.merge(*dasks)
# Ensure output type matches input type
if isinstance(expr, (tuple, set)):
return (type(expr), args), dsk
else:
return args, dsk
if type(expr) is dict:
args, dsk = to_task_dask([[k, v] for k, v in expr.items()])
return (dict, args), dsk
return expr, {}
def tokenize(*args, **kwargs):
"""Mapping function from task -> consistent name.
Parameters
----------
args : object
Python objects that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
pure = kwargs.pop('pure', None)
if pure is None:
pure = _globals.get('delayed_pure', False)
if pure:
return base.tokenize(*args, **kwargs)
else:
return str(uuid.uuid4())
@curry
def delayed(obj, name=None, pure=None, nout=None, traverse=True):
"""Wraps a function or object to produce a ``Delayed``.
``Delayed`` objects act as proxies for the object they wrap, but all
operations on them are done lazily by building up a dask graph internally.
Parameters
----------
obj : object
The function or object to wrap
name : string or hashable, optional
The key to use in the underlying graph for the wrapped object. Defaults
to hashing content.
pure : bool, optional
Indicates whether calling the resulting ``Delayed`` object is a pure
operation. If True, arguments to the call are hashed to produce
deterministic keys. If not provided, the default is to check the global
``delayed_pure`` setting, and fallback to ``False`` if unset.
nout : int, optional
The number of outputs returned from calling the resulting ``Delayed``
object. If provided, the ``Delayed`` output of the call can be iterated
into ``nout`` objects, allowing for unpacking of results. By default
iteration over ``Delayed`` objects will error. Note, that ``nout=1``
expects ``obj``, to return a tuple of length 1, and consequently for
`nout=0``, ``obj`` should return an empty tuple.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``delayed``. For large collections this can be
expensive. If ``obj`` doesn't contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
Examples
--------
Apply to functions to delay execution:
>>> def inc(x):
... return x + 1
>>> inc(10)
11
>>> x = delayed(inc, pure=True)(10)
>>> type(x) == Delayed
True
>>> x.compute()
11
Can be used as a decorator:
>>> @delayed(pure=True)
... def add(a, b):
... return a + b
>>> add(1, 2).compute()
3
``delayed`` also accepts an optional keyword ``pure``. If False, then
subsequent calls will always produce a different ``Delayed``. This is
useful for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = delayed(random, pure=False)()
>>> out2 = delayed(random, pure=False)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @delayed(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
Instead of setting ``pure`` as a property of the callable, you can also set
it contextually using the ``delayed_pure`` setting. Note that this
influences the *call* and not the *creation* of the callable:
>>> import dask
>>> @delayed
... def mul(a, b):
... return a * b
>>> with dask.set_options(delayed_pure=True):
... print(mul(1, 2).key == mul(1, 2).key)
True
>>> with dask.set_options(delayed_pure=False):
... print(mul(1, 2).key == mul(1, 2).key)
False
The key name of the result of calling a delayed object is determined by
hashing the arguments by default. To explicitly set the name, you can use
the ``dask_key_name`` keyword when calling the function:
>>> add(1, 2) # doctest: +SKIP
Delayed('add-3dce7c56edd1ac2614add714086e950f')
>>> add(1, 2, dask_key_name='three')
Delayed('three')
Note that objects with the same key name are assumed to have the same
result. If you set the names explicitly you should make sure your key names
are different for different results.
>>> add(1, 2, dask_key_name='three') # doctest: +SKIP
>>> add(2, 1, dask_key_name='three') # doctest: +SKIP
>>> add(2, 2, dask_key_name='four') # doctest: +SKIP
``delayed`` can also be applied to objects to make operations on them lazy:
>>> a = delayed([1, 2, 3])
>>> isinstance(a, Delayed)
True
>>> a.compute()
[1, 2, 3]
The key name of a delayed object is hashed by default if ``pure=True`` or
is generated randomly if ``pure=False`` (default). To explicitly set the
name, you can use the ``name`` keyword:
>>> a = delayed([1, 2, 3], name='mylist')
>>> a
Delayed('mylist')
Delayed results act as a proxy to the underlying object. Many operators
are supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
"Magic" methods (e.g. operators and attribute access) are assumed to be
pure, meaning that subsequent calls must return the same results. This is
not overrideable. To invoke an impure attribute or operator, you'd need to
use it in a delayed function with ``pure=False``.
>>> class Incrementer(object):
... def __init__(self):
... self._n = 0
... @property
... def n(self):
... self._n += 1
... return self._n
...
>>> x = delayed(Incrementer())
>>> x.n.key == x.n.key
True
>>> get_n = delayed(lambda x: x.n, pure=False)
>>> get_n(x).key == get_n(x).key
False
In contrast, methods are assumed to be impure by default, meaning that
subsequent calls may return different results. To assume purity, set
`pure=True`. This allows sharing of any intermediate values.
>>> a.count(2, pure=True).key == a.count(2, pure=True).key
True
As with function calls, method calls also respect the global
``delayed_pure`` setting and support the ``dask_key_name`` keyword:
>>> a.count(2, dask_key_name="count_2")
Delayed('count_2')
>>> with dask.set_options(delayed_pure=True):
... print(a.count(2).key == a.count(2).key)
True
"""
if isinstance(obj, Delayed):
return obj
if isinstance(obj, base.Base) or traverse:
task, dsk = to_task_dask(obj)
else:
task = quote(obj)
dsk = {}
if task is obj:
if not (nout is None or (type(nout) is int and nout >= 0)):
raise ValueError("nout must be None or a positive integer,"
" got %s" % nout)
if not name:
try:
prefix = obj.__name__
except AttributeError:
prefix = type(obj).__name__
token = tokenize(obj, nout, pure=pure)
name = '%s-%s' % (prefix, token)
return DelayedLeaf(obj, name, pure=pure, nout=nout)
else:
if not name:
name = '%s-%s' % (type(obj).__name__, tokenize(task, pure=pure))
dsk = sharedict.merge(dsk, (name, {name: task}))
return Delayed(name, dsk)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
optimize = defer_to_globals('delayed_optimize', falsey=dont_optimize)(dont_optimize)
class Delayed(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', 'dask', '_length')
_finalize = staticmethod(first)
_default_get = staticmethod(threaded.get)
_optimize = staticmethod(optimize)
def __init__(self, key, dsk, length=None):
self._key = key
if type(dsk) is list: # compatibility with older versions
dsk = sharedict.merge(*dsk)
self.dask = dsk
self._length = length
def __getstate__(self):
return tuple(getattr(self, i) for i in self.__slots__)
def __setstate__(self, state):
for k, v in zip(self.__slots__, state):
setattr(self, k, v)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Delayed({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return dir(type(self))
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError("Attribute {0} not found".format(attr))
return DelayedAttr(self, attr)
def __setattr__(self, attr, val):
if attr in self.__slots__:
object.__setattr__(self, attr, val)
else:
raise TypeError("Delayed objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Delayed objects are immutable")
def __iter__(self):
if getattr(self, '_length', None) is None:
raise TypeError("Delayed objects of unspecified length are "
"not iterable")
for i in range(self._length):
yield self[i]
def __len__(self):
if getattr(self, '_length', None) is None:
raise TypeError("Delayed objects of unspecified length have "
"no len()")
return self._length
def __call__(self, *args, **kwargs):
pure = kwargs.pop('pure', None)
name = kwargs.pop('dask_key_name', None)
func = delayed(apply, pure=pure)
if name is not None:
return func(self, args, kwargs, dask_key_name=name)
return func(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Delayed objects is not supported")
__nonzero__ = __bool__
@classmethod
def _get_binary_operator(cls, op, inv=False):
method = delayed(right(op) if inv else op, pure=True)
return lambda *args, **kwargs: method(*args, **kwargs)
_get_unary_operator = _get_binary_operator
def call_function(func, func_token, args, kwargs, pure=None, nout=None):
dask_key_name = kwargs.pop('dask_key_name', None)
pure = kwargs.pop('pure', pure)
if dask_key_name is None:
name = '%s-%s' % (funcname(func),
tokenize(func_token, *args, pure=pure, **kwargs))
else:
name = dask_key_name
args, dasks = unzip(map(to_task_dask, args), 2)
dsk = sharedict.merge(*dasks)
if kwargs:
dask_kwargs, dsk2 = to_task_dask(kwargs)
dsk.update(dsk2)
task = (apply, func, list(args), dask_kwargs)
else:
task = (func,) + args
dsk.update_with_key({name: task}, key=name)
nout = nout if nout is not None else None
return Delayed(name, dsk, length=nout)
class DelayedLeaf(Delayed):
__slots__ = ('_obj', '_key', '_pure', '_nout')
def __init__(self, obj, key, pure=None, nout=None):
self._obj = obj
self._key = key
self._pure = pure
self._nout = nout
@property
def dask(self):
return {self._key: self._obj}
def __call__(self, *args, **kwargs):
return call_function(self._obj, self._key, args, kwargs,
pure=self._pure, nout=self._nout)
class DelayedAttr(Delayed):
__slots__ = ('_obj', '_attr', '_key')
def __init__(self, obj, attr):
self._obj = obj
self._attr = attr
self._key = 'getattr-%s' % tokenize(obj, attr, pure=True)
@property
def dask(self):
dsk = {self._key: (getattr, self._obj._key, self._attr)}
return sharedict.merge(self._obj.dask, (self._key, dsk))
def __call__(self, *args, **kwargs):
return call_function(methodcaller(self._attr), self._attr, (self._obj,) + args, kwargs)
for op in [operator.abs, operator.neg, operator.pos, operator.invert,
operator.add, operator.sub, operator.mul, operator.floordiv,
operator.truediv, operator.mod, operator.pow, operator.and_,
operator.or_, operator.xor, operator.lshift, operator.rshift,
operator.eq, operator.ge, operator.gt, operator.ne, operator.le,
operator.lt, operator.getitem]:
Delayed._bind_operator(op)
base.normalize_token.register(Delayed, lambda a: a.key)
| {
"repo_name": "mraspaud/dask",
"path": "dask/delayed.py",
"copies": "1",
"size": "15619",
"license": "bsd-3-clause",
"hash": 6235690452268037000,
"line_mean": 31.006147541,
"line_max": 95,
"alpha_frac": 0.5938920545,
"autogenerated": false,
"ratio": 3.689818095913064,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783710150413064,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import sys
import numpy as np
import pandas as pd
import toolz
from dask.async import get_sync
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
from dask.dataframe.categorical import iscategorical
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if iscategorical(index.dtype):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i+1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
###############################################################
# Testing
###############################################################
import pandas.util.testing as tm
from distutils.version import LooseVersion
PANDAS_VERSION = LooseVersion(pd.__version__)
if PANDAS_VERSION >= LooseVersion('0.17.0'):
PANDAS_0170 = True
else:
PANDAS_0170 = False
def _check_dask(dsk, check_names=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
if check_names:
assert dsk.name == result.name
# cache
assert isinstance(dsk._pd, pd.Index), type(dsk._pd)
if check_names:
assert dsk._pd.name == result.name
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
# cache
assert isinstance(dsk._pd, pd.Series), type(dsk._pd)
if check_names:
assert dsk._pd.name == result.name
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
# cache
assert isinstance(dsk._pd, pd.DataFrame), type(dsk._pd)
if check_names:
tm.assert_index_equal(dsk._pd.columns, result.columns)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if PANDAS_0170:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
else:
if isinstance(a, pd.DataFrame):
a = a.sort(columns=a.columns.tolist())
else:
a = a.order()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def eq(a, b, check_names=True, **kwargs):
assert_divisions(a)
assert_divisions(b)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names)
b = _check_dask(b, check_names=check_names)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert df.index.min() >= ddf.divisions[i]
assert df.index.max() < ddf.divisions[i + 1]
if len(results[-1]):
assert results[-1].index.min() >= ddf.divisions[-2]
assert results[-1].index.max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "6703",
"license": "bsd-3-clause",
"hash": 5917703457494593000,
"line_mean": 27.5234042553,
"line_max": 75,
"alpha_frac": 0.5449798598,
"autogenerated": false,
"ratio": 3.5131027253668763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4558082585166876,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import uuid
import numpy as np
import pandas as pd
from toolz import merge
from ..optimize import cull
from ..base import tokenize
from .core import DataFrame, Series, _Frame
from .utils import (strip_categories, shard_df_on_index, _categorize,
get_categories)
def set_index(df, index, npartitions=None, compute=True, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order.
This shuffles and repartitions your data. If done in parallel the
resulting order is non-deterministic.
"""
if isinstance(index, (DataFrame, tuple, list)):
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index))
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
.quantile(np.linspace(0, 1, npartitions+1))
.compute()).tolist()
return set_partition(df, index, divisions, compute=compute, **kwargs)
def new_categories(categories, index):
""" Flop around index for '.index' """
if index in categories:
categories = categories.copy()
categories['.index'] = categories.pop(index)
return categories
def set_partition(df, index, divisions, compute=False, **kwargs):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
See Also
--------
set_index
shuffle
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
columns = df.columns
else:
columns = tuple([c for c in df.columns if c != index])
token = tokenize(df, index, divisions)
always_new_token = uuid.uuid1().hex
import partd
p = ('zpartd-' + always_new_token,)
# Get Categories
catname = 'set-partition--get-categories-old-' + always_new_token
catname2 = 'set-partition--get-categories-new-' + always_new_token
dsk1 = {catname: (get_categories, df._keys()[0]),
p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,), (partd.File,))),
catname2: (new_categories, catname,
index.name if isinstance(index, Series) else index)}
# Partition data on disk
name = 'set-partition--partition-' + always_new_token
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(_set_partition, part, ind, divisions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(_set_partition, part, index, divisions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
if compute:
dsk = merge(df.dask, dsk1, dsk2, dsk3)
if isinstance(index, _Frame):
dsk.update(index.dask)
p, barrier_token, categories = df._get(dsk, [p, barrier_token, catname], **kwargs)
dsk4 = {catname2: categories}
else:
dsk4 = {}
# Collect groups
name = 'set-partition--collect-' + token
if compute and not categories:
dsk4.update(dict(((name, i),
(_set_collect, i, p, barrier_token, df.columns))
for i in range(len(divisions) - 1)))
else:
dsk4.update(dict(((name, i),
(_categorize, catname2,
(_set_collect, i, p, barrier_token, df.columns)))
for i in range(len(divisions) - 1)))
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
if compute:
dsk = cull(dsk, list(dsk4.keys()))
return DataFrame(dsk, name, columns, divisions)
def barrier(args):
list(args)
return 0
def _set_partition(df, index, divisions, p):
""" Shard partition and dump into partd """
df = df.set_index(index)
df = strip_categories(df)
divisions = list(divisions)
shards = shard_df_on_index(df, divisions[1:-1])
p.append(dict(enumerate(shards)))
def _set_collect(group, p, barrier_token, columns):
""" Get new partition dataframe from partd """
try:
return p.get(group)
except ValueError:
assert columns is not None, columns
# when unable to get group, create dummy DataFrame
# which has the same columns as original
return pd.DataFrame(columns=columns)
def shuffle(df, index, npartitions=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, index, npartitions)
always_new_token = uuid.uuid1().hex
import partd
p = ('zpartd-' + always_new_token,)
dsk1 = {p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,),
(partd.File,)))}
# Partition data on disk
name = 'shuffle-partition-' + always_new_token
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(partition, part, ind, npartitions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(partition, part, index, npartitions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier-' + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect-' + token
dsk4 = dict(((name, i),
(collect, i, p, barrier_token))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
return DataFrame(dsk, name, df.columns, divisions)
def partition(df, index, npartitions, p):
""" Partition a dataframe along a grouper, store partitions to partd """
rng = pd.Series(np.arange(len(df)))
if isinstance(index, Iterator):
index = list(index)
if not isinstance(index, (pd.Index, pd.Series, pd.DataFrame)):
index = df[index]
if isinstance(index, pd.Index):
groups = rng.groupby([abs(hash(x)) % npartitions for x in index])
if isinstance(index, pd.Series):
groups = rng.groupby(index.map(lambda x: abs(hash(x)) % npartitions).values)
elif isinstance(index, pd.DataFrame):
groups = rng.groupby(index.apply(
lambda row: abs(hash(tuple(row))) % npartitions,
axis=1).values)
d = dict((i, df.iloc[groups.groups[i]]) for i in range(npartitions)
if i in groups.groups)
p.append(d)
def collect(group, p, barrier_token):
""" Collect partitions from partd, yield dataframes """
return p.get(group)
| {
"repo_name": "vikhyat/dask",
"path": "dask/dataframe/shuffle.py",
"copies": "2",
"size": "8223",
"license": "bsd-3-clause",
"hash": 8297773332939162000,
"line_mean": 31.7609561753,
"line_max": 90,
"alpha_frac": 0.5988082208,
"autogenerated": false,
"ratio": 3.8335664335664337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013238558970269732,
"num_lines": 251
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from dask.delayed import (Delayed, DelayedLeaf, flat_unique, funcname,
to_task_dasks, tokenize, unzip)
from satyr.proxies.messages import Cpus, Disk, Mem
from toolz import curry, merge
from .executor import get
def to_params(expr, **kwargs):
if isinstance(expr, MesosDelayed):
return expr._params
if isinstance(expr, (Iterator, list, tuple, set)):
params = [to_params(e) for e in expr]
return flat_unique(params)
if isinstance(expr, dict):
params = [to_params(e) for e in expr.values()]
return flat_unique(params)
return []
def to_task_dasks_params(expr, **kwargs):
task, dasks = to_task_dasks(expr, **kwargs)
params = to_params(expr, **kwargs)
return task, dasks, params
@curry
def mesos(obj, name=None, pure=True, cpus=1, mem=64, disk=0,
docker='lensa/dask.mesos', force_pull=False, envs={}, uris=[],
**kwargs):
kwargs['resources'] = [Cpus(cpus), Mem(mem), Disk(disk)]
kwargs['docker'] = docker
kwargs['force_pull'] = force_pull
kwargs['envs'] = envs
kwargs['uris'] = uris
if isinstance(obj, MesosDelayed):
return obj
task, dasks, params = to_task_dasks_params(obj)
if not dasks:
return MesosDelayedLeaf(obj, pure=pure, name=name, **kwargs)
else:
if not name:
name = '%s-%s' % (type(obj).__name__, tokenize(task, pure=pure))
dasks.append({name: task})
params.append({name: kwargs})
return MesosDelayed(name, dasks, params)
class MesosDelayed(Delayed):
__slots__ = ('_key', '_dasks', '_params')
_default_get = staticmethod(get)
def __init__(self, name, dasks, params):
super(MesosDelayed, self).__init__(name=name, dasks=dasks)
object.__setattr__(self, '_params', params)
def __getstate__(self):
return (self._key, self._dasks, self._params)
@property
def params(self):
return merge(*self._params)
def __getattr__(self, attr):
if not attr.startswith('_'):
return mesos(getattr, pure=True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __call__(self, *args, **kwargs):
pure = kwargs.pop('pure', False)
name = kwargs.pop('dask_key_name', None)
func = mesos(apply, pure=pure)
if name is not None:
return func(self, args, kwargs, dask_key_name=name)
return func(self, args, kwargs)
def compute(self, **kwargs):
params = kwargs.pop('params', self.params)
return super(MesosDelayed, self).compute(params=params, **kwargs)
class MesosDelayedLeaf(DelayedLeaf, MesosDelayed):
def __init__(self, obj, name=None, pure=False, **params):
super(MesosDelayedLeaf, self).__init__(obj, name=None, pure=False)
object.__setattr__(self, '_params', [{self._key: params}])
def __call__(self, *args, **kwargs):
params = to_params(args)
dask_key_name = kwargs.pop('dask_key_name', None)
pure = kwargs.pop('pure', self.pure)
if dask_key_name is None:
name = (funcname(self._data) + '-' +
tokenize(self._key, *args, pure=pure, **kwargs))
else:
name = dask_key_name
args, dasks, params = unzip(map(to_task_dasks_params, args), 3)
if kwargs:
dask_kwargs, dasks2, params2 = to_task_dasks_params(kwargs)
params = params + (params2,)
dasks = dasks + (dasks2,)
task = (apply, self._data, list(args), dask_kwargs)
else:
task = (self._data,) + args
dasks = flat_unique(dasks)
dasks.append({name: task})
params = flat_unique(params)
params.append({name: self.params[self._key]})
return MesosDelayed(name, dasks, params)
| {
"repo_name": "lensacom/dask.mesos",
"path": "daskos/delayed.py",
"copies": "1",
"size": "3969",
"license": "apache-2.0",
"hash": 8141983443009764000,
"line_mean": 31.2682926829,
"line_max": 76,
"alpha_frac": 0.5938523558,
"autogenerated": false,
"ratio": 3.3778723404255317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4471724696225532,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
from toolz import memoize, first, peek
from datashape import discover, var
from .utils import cls_name, copydoc
from dask.threaded import get as dsk_get
class Chunks(object):
""" An Iterable of chunked data
Iterates over chunks of in-memory data. Contains an iterable or a function
that returns an iterator.
>>> c = Chunks([[1, 2, 3], [4, 5, 6]])
>>> next(iter(c))
[1, 2, 3]
For typed containers see the ``chunks`` function which generates
parametrized Chunks classes.
>>> c = chunks(list)([[1, 2, 3], [4, 5, 6]])
>>> next(iter(c))
[1, 2, 3]
>>> c.container.__name__
'list'
"""
def __init__(self, data):
self.data = data
def __iter__(self):
if callable(self.data):
return self.data()
elif (isinstance(self.data, list) and
len(self.data) and
callable(self.data[0])):
# If this is a set of callables, evaluate
# them using dask before returning an iterator for them
p = []
dsk = {}
for i, f in enumerate(self.data):
dsk['p%d'%i] = (f,)
p.append('p%d'%i)
self.data = dsk_get(dsk, p)
return iter(self.data)
@memoize
@copydoc(Chunks)
def chunks(cls):
""" Parametrized Chunks Class """
return type('chunks(%s)' % cls_name(cls), (Chunks,), {'container': cls})
@discover.register(Chunks)
def discover_chunks(c, **kwargs):
data = c.data
if isinstance(data, Iterator):
fst, c.data = peek(data)
else:
fst = first(c)
return var * discover(fst).subshape[0]
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/chunks.py",
"copies": "4",
"size": "1738",
"license": "bsd-3-clause",
"hash": -7073726055771709000,
"line_mean": 25.3333333333,
"line_max": 79,
"alpha_frac": 0.5742232451,
"autogenerated": false,
"ratio": 3.5182186234817814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.609244186858178,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
import pandas as pd
import toolz
from dask.async import get_sync
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if iscategorical(index.dtype):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i+1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
def _categorize(categories, df):
""" Categorize columns in dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 2, 0]})
>>> categories = {'y': ['A', 'B', 'c']}
>>> _categorize(categories, df)
x y
0 1 A
1 2 c
2 3 A
>>> _categorize(categories, df.y)
0 A
1 c
2 A
dtype: category
Categories (3, object): [A, B, c]
"""
if '.index' in categories:
index = pd.CategoricalIndex(
pd.Categorical.from_codes(df.index.values, categories['.index']))
else:
index = df.index
if isinstance(df, pd.Series):
if df.name in categories:
cat = pd.Categorical.from_codes(df.values, categories[df.name])
return pd.Series(cat, index=index)
else:
return df
else:
return pd.DataFrame(
dict((col, pd.Categorical.from_codes(df[col].values, categories[col])
if col in categories
else df[col].values)
for col in df.columns),
columns=df.columns,
index=index)
def strip_categories(df):
""" Strip categories from dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> strip_categories(df)
x y
0 1 0
1 2 1
2 3 0
"""
return pd.DataFrame(dict((col, df[col].cat.codes.values
if iscategorical(df.dtypes[col])
else df[col].values)
for col in df.columns),
columns=df.columns,
index=df.index.codes
if iscategorical(df.index.dtype)
else df.index)
def iscategorical(dt):
return isinstance(dt, pd.core.common.CategoricalDtype)
def get_categories(df):
"""
Get Categories of dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> get_categories(df)
{'y': Index([u'A', u'B'], dtype='object')}
"""
result = dict((col, df[col].cat.categories) for col in df.columns
if iscategorical(df.dtypes[col]))
if iscategorical(df.index.dtype):
result['.index'] = df.index.categories
return result
###############################################################
# Testing
###############################################################
import pandas.util.testing as tm
from distutils.version import LooseVersion
PANDAS_VERSION = LooseVersion(pd.__version__)
if PANDAS_VERSION >= LooseVersion('0.17.0'):
PANDAS_0170 = True
else:
PANDAS_0170 = False
def _check_dask(dsk, check_names=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series)
assert isinstance(dsk.columns, tuple)
assert len(dsk.columns) == 1
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, tuple)
if check_names:
columns = pd.Index(dsk.columns)
tm.assert_index_equal(columns, result.columns)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if PANDAS_0170:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
else:
if isinstance(a, pd.DataFrame):
a = a.sort(columns=a.columns.tolist())
else:
a = a.order()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def eq(a, b, check_names=True, ignore_index=False):
a = _check_dask(a, check_names=check_names)
b = _check_dask(b, check_names=check_names)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label)) | {
"repo_name": "vikhyat/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "7515",
"license": "bsd-3-clause",
"hash": -8145397005283420000,
"line_mean": 27.4696969697,
"line_max": 85,
"alpha_frac": 0.5260146374,
"autogenerated": false,
"ratio": 3.590539894887721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609509291219597,
"avg_score": 0.0014090482136248162,
"num_lines": 264
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
import pandas as pd
import toolz
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if iscategorical(index.dtype):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i+1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
def _categorize(categories, df):
""" Categorize columns in dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 2, 0]})
>>> categories = {'y': ['A', 'B', 'c']}
>>> _categorize(categories, df)
x y
0 1 A
1 2 c
2 3 A
>>> _categorize(categories, df.y)
0 A
1 c
2 A
dtype: category
Categories (3, object): [A, B, c]
"""
if '.index' in categories:
index = pd.CategoricalIndex(
pd.Categorical.from_codes(df.index.values, categories['.index']))
else:
index = df.index
if isinstance(df, pd.Series):
if df.name in categories:
cat = pd.Categorical.from_codes(df.values, categories[df.name])
return pd.Series(cat, index=index)
else:
return df
else:
return pd.DataFrame(
dict((col, pd.Categorical.from_codes(df[col].values, categories[col])
if col in categories
else df[col].values)
for col in df.columns),
columns=df.columns,
index=index)
def strip_categories(df):
""" Strip categories from dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> strip_categories(df)
x y
0 1 0
1 2 1
2 3 0
"""
return pd.DataFrame(dict((col, df[col].cat.codes.values
if iscategorical(df.dtypes[col])
else df[col].values)
for col in df.columns),
columns=df.columns,
index=df.index.codes
if iscategorical(df.index.dtype)
else df.index)
def iscategorical(dt):
return isinstance(dt, pd.core.common.CategoricalDtype)
def get_categories(df):
"""
Get Categories of dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> get_categories(df)
{'y': Index([u'A', u'B'], dtype='object')}
"""
result = dict((col, df[col].cat.categories) for col in df.columns
if iscategorical(df.dtypes[col]))
if iscategorical(df.index.dtype):
result['.index'] = df.index.categories
return result
| {
"repo_name": "pombredanne/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "4423",
"license": "bsd-3-clause",
"hash": -6940106720555677000,
"line_mean": 25.8060606061,
"line_max": 85,
"alpha_frac": 0.5145828623,
"autogenerated": false,
"ratio": 3.5019794140934284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9509346351829248,
"avg_score": 0.001443184912836207,
"num_lines": 165
} |
from __future__ import absolute_import, division, print_function
from collections import Mapping
from contextlib import contextmanager
import pandas as pd
from . import formatting, indexing
from .merge import (
expand_and_merge_variables, merge_coords, merge_coords_for_inplace_math)
from .pycompat import OrderedDict
from .utils import Frozen, ReprObject
from .variable import Variable
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = ReprObject('<this-array>')
class AbstractCoordinates(Mapping, formatting.ReprMixin):
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
self.update({key: value})
@property
def indexes(self):
return self._data.indexes
@property
def variables(self):
raise NotImplementedError
def _update_coords(self, coords):
raise NotImplementedError
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __unicode__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.Index`.
Parameters
----------
ordered_dims : sequence, optional
Possibly reordered version of this object's dimensions indicating
the order in which dimensions should appear on the result.
Returns
-------
pandas.Index
Index subclass corresponding to the outer-product of all dimension
coordinates. This will be a MultiIndex if this object is has more
than more dimension.
"""
if ordered_dims is None:
ordered_dims = self.dims
elif set(ordered_dims) != set(self.dims):
raise ValueError('ordered_dims must match dims, but does not: '
'{} vs {}'.format(ordered_dims, self.dims))
if len(ordered_dims) == 0:
raise ValueError('no valid index for a 0-dimensional object')
elif len(ordered_dims) == 1:
(dim,) = ordered_dims
return self._data.get_index(dim)
else:
indexes = [self._data.get_index(k) for k in ordered_dims]
names = list(ordered_dims)
return pd.MultiIndex.from_product(indexes, names=names)
def update(self, other):
other_vars = getattr(other, 'variables', other)
coords = merge_coords([self.variables, other_vars],
priority_arg=1, indexes=self.indexes)
self._update_coords(coords)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = expand_and_merge_variables(
[self.variables, other.variables])
return variables
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in priority_vars, because we didn't align
# first
priority_vars = OrderedDict(
kv for kv in self.variables.items() if kv[0] not in self.dims)
variables = merge_coords_for_inplace_math(
[self.variables, other.variables], priority_vars=priority_vars)
yield
self._update_coords(variables)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords))
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def variables(self):
return Frozen(OrderedDict((k, v)
for k, v in self._data.variables.items()
if k in self._names))
def __getitem__(self, key):
if key in self._data.data_vars:
raise KeyError(key)
return self._data[key]
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim, size in dims.items():
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dict(dims)
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars]
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
def __getitem__(self, key):
return self._data._getitem_coord(key)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
coords_plus_data = coords.copy()
coords_plus_data[_THIS_ARRAY] = self._data.variable
dims = calculate_dimensions(coords_plus_data)
if not set(dims) <= set(self.dims):
raise ValueError('cannot add coordinates with new dimensions to '
'a DataArray')
self._data._coords = coords
@property
def variables(self):
return Frozen(self._data._coords)
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
return Dataset._from_vars_and_coord_names(coords, set(coords))
def to_dataset(self):
return self._to_dataset()
def __delitem__(self, key):
del self._data._coords[key]
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return self._data._ipython_key_completions_()
class LevelCoordinatesSource(object):
"""Iterator for MultiIndex level coordinates.
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""
def __init__(self, data_object):
self._data = data_object
def __getitem__(self, key):
# not necessary -- everything here can already be found in coords.
raise KeyError
def __iter__(self):
return iter(self._data._level_coords)
class Indexes(Mapping, formatting.ReprMixin):
"""Ordered Mapping[str, pandas.Index] for xarray objects.
"""
def __init__(self, variables, sizes):
"""Not for public consumption.
Parameters
----------
variables : OrderedDict[Any, Variable]
Reference to OrderedDict holding variable objects. Should be the
same dictionary used by the source object.
sizes : OrderedDict[Any, int]
Map from dimension names to sizes.
"""
self._variables = variables
self._sizes = sizes
def __iter__(self):
for key in self._sizes:
if key in self._variables:
yield key
def __len__(self):
return sum(key in self._variables for key in self._sizes)
def __contains__(self, key):
return key in self._sizes and key in self._variables
def __getitem__(self, key):
if key not in self._sizes:
raise KeyError(key)
return self._variables[key].to_index()
def __unicode__(self):
return formatting.indexes_repr(self)
def assert_coordinate_consistent(obj, coords):
""" Maeke sure the dimension coordinate of obj is
consistent with coords.
obj: DataArray or Dataset
coords: Dict-like of variables
"""
for k in obj.dims:
# make sure there are no conflict in dimension coordinates
if k in coords and k in obj.coords:
if not coords[k].equals(obj[k].variable):
raise IndexError(
'dimension coordinate {!r} conflicts between '
'indexed and indexing objects:\n{}\nvs.\n{}'
.format(k, obj[k], coords[k]))
def remap_label_indexers(obj, method=None, tolerance=None, **indexers):
"""
Remap **indexers from obj.coords.
If indexer is an instance of DataArray and it has coordinate, then this
coordinate will be attached to pos_indexers.
Returns
-------
pos_indexers: Same type of indexers.
np.ndarray or Variable or DataArra
new_indexes: mapping of new dimensional-coordinate.
"""
from .dataarray import DataArray
v_indexers = {k: v.variable.data if isinstance(v, DataArray) else v
for k, v in indexers.items()}
pos_indexers, new_indexes = indexing.remap_label_indexers(
obj, v_indexers, method=method, tolerance=tolerance
)
# attach indexer's coordinate to pos_indexers
for k, v in indexers.items():
if isinstance(v, Variable):
pos_indexers[k] = Variable(v.dims, pos_indexers[k])
elif isinstance(v, DataArray):
# drop coordinates found in indexers since .sel() already
# ensures alignments
coords = OrderedDict((k, v) for k, v in v._coords.items()
if k not in indexers)
pos_indexers[k] = DataArray(pos_indexers[k],
coords=coords, dims=v.dims)
return pos_indexers, new_indexes
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/coordinates.py",
"copies": "1",
"size": "12070",
"license": "apache-2.0",
"hash": 6314638935549490000,
"line_mean": 31.9781420765,
"line_max": 79,
"alpha_frac": 0.6092792046,
"autogenerated": false,
"ratio": 4.48199034533977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 366
} |
from __future__ import absolute_import, division, print_function
from collections import Mapping
from keyword import iskeyword
import re
import datashape
from datashape import (
dshape,
DataShape,
Record,
Var,
Fixed,
promote,
Option,
Null,
)
from datashape.predicates import (
isscalar,
iscollection,
isboolean,
isrecord,
istabular,
)
import numpy as np
from odo.utils import copydoc
import toolz
from toolz import concat, memoize, partial, first, unique, merge
from toolz.curried import map, filter
from ..compatibility import _strtypes, builtins, boundmethod, PY2
from .core import (
Node,
_setattr,
common_subexpression,
path,
resolve_args,
subs,
)
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices, maxshape
from ..utils import attribute, as_attribute
__all__ = [
'Apply',
'Cast',
'Coalesce',
'Coerce',
'ElemWise',
'Expr',
'Field',
'Label',
'Map',
'Projection',
'ReLabel',
'Selection',
'SimpleSelection',
'Slice',
'Symbol',
'apply',
'cast',
'coalesce',
'coerce',
'discover',
'drop_field',
'label',
'ndim',
'projection',
'relabel',
'selection',
'shape',
'symbol',
]
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if not s or s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def __repr__(self):
return str(self)
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, (Record, datashape.Map)):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return self._select(key)
elif (isinstance(key, list) and
builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
@attribute
def schema(self):
try:
m = self._schema
except AttributeError:
schema = datashape.dshape(self.dshape.measure)
else:
schema = m()
return _setattr(self, 'schema', schema)
@attribute
def dshape(self):
return _setattr(self, 'dshape', self._dshape())
@property
def fields(self):
measure = self.dshape.measure
if isinstance(self.dshape.measure, Option):
measure = measure.ty
if isinstance(measure, Record):
return measure.names
elif isinstance(measure, datashape.Map):
if not isrecord(self.dshape.measure.value):
raise TypeError('Foreign key must reference a '
'Record datashape')
return measure.value.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if (isrecord(self.dshape.measure) or
isinstance(self.dshape.measure, datashape.Map) and
self.fields):
result.extend(map(valid_identifier, self.fields))
result.extend(toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape)))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
assert key != '_hash', \
'%s should set _hash in _init' % type(self).__name__
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields), self.fields))
measure = self.dshape.measure
if isinstance(measure, datashape.Map): # Foreign key
measure = measure.key
# prefer the method if there's a field with the same name
methods = toolz.merge(
schema_methods(measure),
dshape_methods(self.dshape)
)
if key in methods:
func = methods[key]
if func in method_properties:
result = func(self)
elif getattr(func, '__get__', None):
result = func.__get__(self, type(self))
else:
result = boundmethod(func, self)
elif self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
raise
# cache the attribute lookup, getattr will not be invoked again.
_setattr(self, key, result)
return result
@attribute
def _name(self):
measure = self.dshape.measure
if len(self._inputs) == 1 and isscalar(getattr(measure, 'key',
measure)):
child_measure = self._child.dshape.measure
if isscalar(getattr(child_measure, 'key', child_measure)):
# memoize the result
return _setattr(self, '_name', self._child._name)
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
# Add some placeholders to help with refactoring. If we forget to attach
# these methods later we will get better errors.
# To find the real definition, look for usage of ``@as_attribute``
for method in ('_project', '_select', 'cast'):
@attribute
def _(self):
raise AssertionError('method added after class definition')
locals()[method] = _
del _
del method
def sanitized_dshape(dshape, width=50):
pretty_dshape = datashape.pprint(dshape, width=width).replace('\n', '')
if len(pretty_dshape) > width:
pretty_dshape = "{}...".format(pretty_dshape[:width])
return pretty_dshape
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
<`points` symbol; dshape='5 * 3 * {x: int32, y: int32}'>
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
_arguments = '_name', 'dshape', '_token'
_input_attributes = ()
def __repr__(self):
fmt = "<`{}` symbol; dshape='{}'>"
return fmt.format(self._name, sanitized_dshape(self.dshape))
def __str__(self):
return self._name or ''
def _resources(self):
return {}
@copydoc(Symbol)
def symbol(name, dshape, token=None):
return Symbol(name, datashape.dshape(dshape), token or 0)
@dispatch(Symbol, Mapping)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = (subs(arg, d) for arg in o._args)
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
def _dshape(self):
return datashape.DataShape(
*(self._child.dshape.shape + tuple(self.schema))
)
class Field(ElemWise):
"""
A single field from an expression.
Get a single field from an expression with record-type schema.
We store the name of the field in the ``_name`` attribute.
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
For fields that aren't valid Python identifiers, use ``[]`` syntax:
>>> points = symbol('points', '5 * 3 * {"space station": float64}')
>>> points['space station'].dshape
dshape("5 * 3 * float64")
"""
_arguments = '_child', '_name'
def __str__(self):
fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]'
return fmt % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
def _dshape(self):
shape = self._child.dshape.shape
measure = self._child.dshape.measure
# TODO: is this too special-case-y?
schema = getattr(measure, 'value', measure).dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
"""Select a subset of fields from data.
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
_arguments = '_child', '_fields'
@property
def fields(self):
return list(self._fields)
def _schema(self):
measure = self._child.schema.measure
d = getattr(measure, 'value', measure).dict
return DataShape(Record((name, d[name]) for name in self.fields))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
@as_attribute(Expr, '_project')
@copydoc(Projection)
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
"""Elements `start` until `stop`. On many backends, a `step` parameter
is also allowed.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts[2:7].dshape
dshape("5 * {name: string, amount: int32}")
>>> accounts[2:7:2].dshape
dshape("3 * {name: string, amount: int32}")
"""
_arguments = '_child', '_index'
def _dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
index = ', '.join(map(str, self._index))
else:
index = str(self._index)
return '%s[%s]' % (self._child, index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
_arguments = '_child', 'predicate'
_input_attributes = '_child', 'predicate'
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
def _dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
class SimpleSelection(Selection):
"""Internal selection class that does not treat the predicate as an input.
"""
_arguments = Selection._arguments
_input_attributes = '_child',
@as_attribute(Expr, '_select')
@copydoc(Selection)
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(
isinstance(node, (VarArgsExpr, ElemWise, Symbol)) or
node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
class Label(ElemWise):
"""An expression with a name.
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
_arguments = '_child', 'label'
def _schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return 'label(%s, %r)' % (self._child, self.label)
@copydoc(Label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> s.relabel(0='foo') # doctest: +SKIP
Traceback (most recent call last):
...
SyntaxError: keyword can't be an expression
Notes
-----
When names are not valid Python names, such as integers or string with
spaces, you must pass a dictionary to ``relabel``. For example
.. code-block:: python
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> t = symbol('t', 'var * {"whoo hoo": ?float32}')
>>> t.relabel({"whoo hoo": 'foo'})
t.relabel({'whoo hoo': 'foo'})
See Also
--------
blaze.expr.expressions.Label
"""
_arguments = '_child', 'labels'
def _schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
@copydoc(ReLabel)
def relabel(child, labels=None, **kwargs):
labels = {k: v
for k, v in toolz.merge(labels or {}, kwargs).items() if k != v}
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, Mapping): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
_arguments = '_child', 'func', '_asschema', '_name0'
def _schema(self):
if self._asschema:
return dshape(self._asschema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
if PY2:
copydoc(Map, Expr.map.im_func)
else:
copydoc(Map, Expr.map)
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
_arguments = '_child', 'func', '_asdshape', '_splittable'
def _schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
def _dshape(self):
return self._asdshape
@copydoc(Apply)
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
class Coerce(ElemWise):
"""Coerce an expression to a different type.
Examples
--------
>>> t = symbol('t', '100 * float64')
>>> t.coerce(to='int64')
t.coerce(to='int64')
>>> t.coerce('float32')
t.coerce(to='float32')
>>> t.coerce('int8').dshape
dshape("100 * int8")
"""
_arguments = '_child', 'to'
def _schema(self):
return self.to
def __str__(self):
return '%s.coerce(to=%r)' % (self._child, str(self.schema))
@copydoc(Coerce)
def coerce(expr, to):
return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to)
class Cast(Expr):
"""Cast an expression to a different type.
This is only an expression time operation.
Examples
--------
>>> s = symbol('s', '?int64')
>>> s.cast('?int32').dshape
dshape("?int32")
# Cast to correct mislabeled optionals
>>> s.cast('int64').dshape
dshape("int64")
# Cast to give concrete dimension length
>>> t = symbol('t', 'var * float32')
>>> t.cast('10 * float32').dshape
dshape("10 * float32")
"""
_arguments = '_child', 'to'
def _dshape(self):
return self.to
def __str__(self):
return 'cast(%s, to=%r)' % (self._child, str(self.to))
@as_attribute(Expr)
@copydoc(Cast)
def cast(expr, to):
return Cast(expr, dshape(to) if isinstance(to, _strtypes) else to)
def binop_name(expr):
if not isscalar(expr.dshape.measure):
return None
l = getattr(expr.lhs, '_name', None)
r = getattr(expr.rhs, '_name', None)
if bool(l) ^ bool(r):
return l or r
elif l == r:
return l
return None
def binop_inputs(expr):
if isinstance(expr.lhs, Expr):
yield expr.lhs
if isinstance(expr.rhs, Expr):
yield expr.rhs
class Coalesce(Expr):
"""SQL like coalesce.
.. code-block:: python
coalesce(a, b) = {
a if a is not NULL
b otherwise
}
Examples
--------
>>> coalesce(1, 2)
1
>>> coalesce(1, None)
1
>>> coalesce(None, 2)
2
>>> coalesce(None, None) is None
True
"""
_arguments = 'lhs', 'rhs', 'dshape'
_input_attributes = 'lhs', 'rhs'
def __str__(self):
return 'coalesce(%s, %s)' % (self.lhs, self.rhs)
_name = property(binop_name)
@property
def _inputs(self):
return tuple(binop_inputs(self))
@copydoc(Coalesce)
def coalesce(a, b):
a_dshape = discover(a)
a_measure = a_dshape.measure
isoption = isinstance(a_measure, Option)
if isoption:
a_measure = a_measure.ty
isnull = isinstance(a_measure, Null)
if isnull:
# a is always null, this is just b
return b
if not isoption:
# a is not an option, this is just a
return a
b_dshape = discover(b)
return Coalesce(a, b, DataShape(*(
maxshape((a_dshape.shape, b_dshape.shape)) +
(promote(a_measure, b_dshape.measure),)
)))
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
def drop_field(expr, field, *fields):
"""Drop a field or fields from a tabular expression.
Parameters
----------
expr : Expr
A tabular expression to drop columns from.
*fields
The names of the fields to drop.
Returns
-------
dropped : Expr
The new tabular expression with some columns missing.
Raises
------
TypeError
Raised when ``expr`` is not tabular.
ValueError
Raised when a column is not in the fields of ``expr``.
See Also
--------
:func:`blaze.expr.expressions.projection`
"""
to_remove = set((field,)).union(fields)
new_fields = []
for field in expr.fields:
if field not in to_remove:
new_fields.append(field)
else:
to_remove.remove(field)
if to_remove:
raise ValueError(
'fields %r were not in the fields of expr (%r)' % (
sorted(to_remove),
expr.fields
),
)
return expr[new_fields]
dshape_method_list.extend([
(lambda ds: True, {apply}),
(iscollection, {shape, ndim}),
(lambda ds: iscollection(ds) and isscalar(ds.measure), {coerce}),
(istabular, {drop_field}),
])
schema_method_list.extend([
(isscalar, {label, relabel, coerce}),
(isrecord, {relabel}),
(lambda ds: isinstance(ds, Option), {coalesce}),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
class VarArgsExpr(Expr):
"""An expression used for collecting variadic arguments into a single, typed
container.
Parameters
----------
_inputs : tuple[any]
The arguments that this expression will compute.
"""
_arguments = '_inputs',
@attribute
def _inputs(self):
raise NotImplementedError('overridden in _init')
def _dshape(self):
return DataShape(datashape.void)
def varargsexpr(args):
"""Create a varargs expr which will be materialzed as a ``VarArgs``
"""
# lazy import to break cycle
from blaze.compute.varargs import register_varargs_arity
args = tuple(args)
register_varargs_arity(len(args))
return VarArgsExpr(args)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/expressions.py",
"copies": "3",
"size": "29015",
"license": "bsd-3-clause",
"hash": 5986935608387100000,
"line_mean": 26.0158286778,
"line_max": 80,
"alpha_frac": 0.5678097536,
"autogenerated": false,
"ratio": 3.8258175105485233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001302163808213977,
"num_lines": 1074
} |
from __future__ import absolute_import, division, print_function
from collections import Mapping, OrderedDict
import datetime
from functools import reduce, partial
import inspect
from itertools import repeat
import numbers
from pprint import pformat
from weakref import WeakValueDictionary
import toolz
from toolz import unique, concat, first
from ..compatibility import _strtypes
from ..dispatch import dispatch
from ..utils import ordered_intersect
__all__ = ['Node', 'path', 'common_subexpression', 'eval_str']
base = (numbers.Number,) + _strtypes + (datetime.datetime, datetime.timedelta)
def resolve_args(cls, *args, **kwargs):
"""Resolve the arguments from a node class into an ordereddict.
All arguments are assumed to have a default of None.
This is sort of like getargspec but uses the `Node` specific machinery.
Parameters
----------
cls : subclass of Node
The class to resolve the arguments for.
*args, **kwargs
The arguments that were passed.
Returns
-------
args : OrderedDict
A dictionary mapping argument names to their value in the order
they appear in the `_arguments` tuple.
Examples
--------
>>> class MyNode(Node):
... _arguments = 'a', 'b', 'c'
...
good cases
>>> resolve_args(MyNode, 1, 2, 3)
OrderedDict([('a', 1), ('b', 2), ('c', 3)])
>>> resolve_args(MyNode, 1, 2, c=3)
OrderedDict([('a', 1), ('b', 2), ('c', 3)])
>>> resolve_args(MyNode, a=1, b=2, c=3)
OrderedDict([('a', 1), ('b', 2), ('c', 3)])
error cases
>>> resolve_args(MyNode, 1, 2, 3, a=4)
Traceback (most recent call last):
...
TypeError: MyNode got multiple values for argument 'a'
>>> resolve_args(MyNode, 1, 2, 3, 4)
Traceback (most recent call last):
...
TypeError: MyNode takes 3 positional arguments but 4 were given
>>> resolve_args(MyNode, 1, 2, 3, d=4)
Traceback (most recent call last):
...
TypeError: MyNode got unknown keywords: d
"""
attrs = cls._arguments
attrset = set(attrs)
if not set(kwargs) <= attrset:
raise TypeError(
'%s got unknown keywords: %s' % (
cls.__name__,
', '.join(set(kwargs) - attrset),
),
)
if len(args) > len(attrs):
raise TypeError(
'%s takes %d positional argument%s but %d were given' % (
cls.__name__,
len(attrs),
's' if len(attrs) > 1 else '',
len(args),
),
)
attributes = OrderedDict(zip(attrs, repeat(None)))
to_add = dict(zip(attrs, args))
attributes.update(to_add)
added = set(to_add)
for key, value in kwargs.items():
if key in added:
raise TypeError(
'%s got multiple values for argument %r' % (
cls.__name__,
key,
),
)
attributes[key] = value
added.add(key)
return attributes
def _static_identity(ob):
return type(ob)._static_identity(*ob._args)
def _setattr(ob, name, value):
object.__setattr__(ob, name, value)
return value
class Node(object):
""" Node in a tree
This serves as the base class for ``Expr``. This class holds all of the
tree traversal functions that are independent of tabular or array
computation. This is everything that we can do independent of the problem
domain. Note that datashape is not imported.
See Also
--------
blaze.expr.expressions.Expr
"""
_arguments = '_child',
_input_attributes = '_child',
__expr_instance_cache = WeakValueDictionary()
def __new__(cls, *args, **kwargs):
static_id = cls._static_identity(*args, **kwargs)
try:
return cls.__expr_instance_cache[static_id]
except KeyError:
cls.__expr_instance_cache[static_id] = self = super(
Node,
cls,
).__new__(cls)._init(*args, **kwargs)
return self
def _init(self, *args, **kwargs):
for name, arg in resolve_args(type(self), *args, **kwargs).items():
_setattr(self, name, arg)
_setattr(self, '_hash', None)
return self
def __setattr__(self, name, value):
raise AttributeError('cannot set attributes of immutable objects')
@property
def _args(self):
return tuple(getattr(self, slot) for slot in self._arguments)
@classmethod
def _static_identity(cls, *args, **kwargs):
return (cls,) + tuple(resolve_args(cls, *args, **kwargs).values())
@property
def _inputs(self):
return tuple(getattr(self, i) for i in self._input_attributes)
def _leaves(self):
""" Leaves of an expression tree
All nodes without inputs. Leaves are returned in order, left to right.
>>> from blaze.expr import symbol, join, by
>>> t = symbol('t', 'var * {id: int32, name: string}')
>>> t._leaves()
[<`t` symbol; dshape='var * {id: int32, name: string}'>]
>>> by(t.name, count=t.id.nunique())._leaves()
[<`t` symbol; dshape='var * {id: int32, name: string}'>]
>>> v = symbol('v', 'var * {id: int32, city: string}')
>>> join(t, v)._leaves() == [t, v]
True
"""
if not self._inputs:
return [self]
else:
return list(unique(concat(i._leaves() for i in self._inputs if
isinstance(i, Node))))
def isidentical(self, other):
"""Identity check for blaze expressions.
"""
return self is other
def __hash__(self):
hash_ = self._hash
if hash_ is None:
hash_ = _setattr(
self,
'_hash',
hash((type(self), _static_identity(self))),
)
return hash_
def __str__(self):
rep = [
'%s=%s' % (slot, _str(arg))
for slot, arg in zip(self._arguments, self._args)
]
return '%s(%s)' % (type(self).__name__, ', '.join(rep))
def _traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (
arg._traverse() if isinstance(arg, Node) else [arg]
for arg in self._inputs
)
for item in concat(traversals):
yield item
def _subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount + 3
>>> expr._subs({3: 4, 'amount': 'id'}).isidentical(t.id + 4)
True
"""
return subs(self, d)
def _resources(self):
return toolz.merge(arg._resources() for arg in self._inputs)
def _subterms(self):
return subterms(self)
def __contains__(self, other):
return other in set(self._subterms())
def __reduce_ex__(self, protocol):
if protocol < 2:
raise ValueError(
'blaze expressions may only be pickled with protocol'
' 2 or greater',
)
return type(self), self._args
def __eq__(self, other):
try:
return self.isidentical(other) or self._eq(other)
except AttributeError:
return False
def __ne__(self, other):
return self._ne(other)
def __lt__(self, other):
return self._lt(other)
def __le__(self, other):
return self._le(other)
def __gt__(self, other):
return self._gt(other)
def __ge__(self, other):
return self._ge(other)
def __add__(self, other):
return self._add(other)
def __radd__(self, other):
return self._radd(other)
def __mul__(self, other):
return self._mul(other)
def __rmul__(self, other):
return self._rmul(other)
def __div__(self, other):
return self._div(other)
def __rdiv__(self, other):
return self._rdiv(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return self._floordiv(other)
def __rfloordiv__(self, other):
return self._rfloordiv(other)
def __sub__(self, other):
return self._sub(other)
def __rsub__(self, other):
return self._rsub(other)
def __pow__(self, other):
return self._pow(other)
def __rpow__(self, other):
return self._rpow(other)
def __mod__(self, other):
return self._mod(other)
def __rmod__(self, other):
return self._rmod(other)
def __or__(self, other):
return self._or(other)
def __ror__(self, other):
return self._ror(other)
def __and__(self, other):
return self._and(other)
def __rand__(self, other):
return self._rand(other)
def __neg__(self):
return self._neg()
def __invert__(self):
return self._invert()
def __abs__(self):
from .math import abs
return abs(self)
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
keywords = o.keywords
kwds = (
', '.join('%s=%r' % item for item in keywords.items())
if keywords else
''
)
args = ', '.join(map(repr, o.args))
arguments = []
if args:
arguments.append(args)
if kwds:
arguments.append(kwds)
return 'partial(%s, %s)' % (
get_callable_name(o.func),
', '.join(arguments),
)
try:
# python 3 makes builtins look nice
return o.__qualname__
except AttributeError:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return repr(s)
elif callable(s):
return get_callable_name(s)
elif isinstance(s, Node):
return str(s)
elif isinstance(s, (list, tuple)):
body = ", ".join(_str(x) for x in s)
return "({0})".format(body if len(s) > 1 else (body + ","))
else:
return pformat(s).rstrip()
@dispatch(Node)
def subterms(expr):
return concat([[expr], concat(map(subterms, expr._inputs))])
@dispatch(object)
def subterms(x):
yield x
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = {k: v for k, v in d.items() if k is not v}
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except TypeError:
pass
return _subs(o, d)
@dispatch((tuple, list), Mapping)
def _subs(o, d):
return type(o)(subs(arg, d) for arg in o)
@dispatch(Node, Mapping)
def _subs(o, d):
"""
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> subs(t, {'balance': 'amount'}).fields
['name', 'amount']
"""
newargs = (subs(arg, d) for arg in o._args)
return type(o)(*newargs)
@dispatch(object, Mapping)
def _subs(o, d):
""" Private dispatched version of ``subs``
>>> subs('Hello', {})
'Hello'
"""
return o
def path(a, b):
""" A path of nodes from a to b
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount.sum()
>>> list(path(expr, t))
[sum(t.amount), t.amount, <`t` symbol; dshape='...'>]
"""
while not a.isidentical(b):
yield a
if not a._inputs:
break
for child in a._inputs:
if any(b.isidentical(node) for node in child._traverse()):
a = child
break
yield a
def common_subexpression(expr, *exprs):
""" Common sub expression between subexpressions
Examples
--------
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> common_subexpression(t.x, t.y)
<`t` symbol; dshape='var * {x: int32, y: int32}'>
"""
# only one expression has itself as a common subexpression
if not exprs:
return expr
exprs = (expr,) + exprs
# get leaves for every expression
all_leaves = [expr._leaves() for expr in exprs]
# leaves common to all expressions
leaves = set.intersection(*map(set, all_leaves))
# no common leaves therefore no common subexpression
if not leaves:
raise ValueError(
'No common leaves found in expressions %s' % list(exprs)
)
# list of paths from each expr to each leaf
pathlist = [list(path(expr, leaf)) for expr in exprs for leaf in leaves]
# ordered intersection of paths
common = reduce(ordered_intersect, pathlist)
if not common:
raise ValueError(
'No common subexpression found in paths to leaf: %s' % list(
map(set, pathlist)
)
)
# the first expression is the deepest node in the tree that is an ancestor
# of every expression in `exprs`
return first(common)
def eval_str(expr):
""" String suitable for evaluation
>>> from blaze.expr import symbol, eval_str
>>> x = symbol('x', 'real')
>>> eval_str(2*x + 1)
'(2 * x) + 1'
>>> from datetime import date
>>> eval_str(date(2000, 1, 20))
'datetime.date(2000, 1, 20)'
"""
from datetime import date, datetime
if isinstance(expr, (date, datetime)):
return repr(expr)
return repr(expr) if isinstance(expr, _strtypes) else str(expr)
def parenthesize(s):
"""
>>> parenthesize('1')
'1'
>>> parenthesize('1 + 2')
'(1 + 2)'
"""
if ' ' in s:
return '(%s)' % s
else:
return s
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/core.py",
"copies": "3",
"size": "14485",
"license": "bsd-3-clause",
"hash": 2019177139318326000,
"line_mean": 25.240942029,
"line_max": 79,
"alpha_frac": 0.5419399379,
"autogenerated": false,
"ratio": 3.829984135378107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5871924073278108,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple, Iterator
from contextlib import contextmanager
from warnings import warn
from datashape import discover
import networkx as nx
import numpy as np
from toolz import concatv
from .compatibility import map, adjacency
from .utils import expand_tuples, ignoring
ooc_types = set() # Out-of-Core types
class FailedConversionWarning(UserWarning):
def __init__(self, src, dest, exc):
self.src = src
self.dest = dest
self.exc = exc
def __str__(self):
return 'Failed on %s -> %s. Working around\nError message:\n%s' % (
self.src.__name__, self.dest.__name__, self.exc,
)
class IterProxy(object):
"""An proxy to another iterator to support swapping the underlying stream
mid-iteration.
Parameters
----------
it : iterable
The iterable to proxy.
Attributes
----------
it : iterable
The iterable being proxied. This can be reassigned to change the
underlying stream.
"""
def __init__(self, it):
self._it = iter(it)
def __next__(self):
return next(self.it)
next = __next__ # py2 compat
def __iter__(self):
return self
@property
def it(self):
return self._it
@it.setter
def it(self, value):
self._it = iter(value)
class NetworkDispatcher(object):
def __init__(self, name):
self.name = name
self.graph = nx.DiGraph()
def register(self, a, b, cost=1.0):
sigs = expand_tuples([a, b])
def _(func):
for a, b in sigs:
self.graph.add_edge(b, a, cost=cost, func=func)
return func
return _
def path(self, *args, **kwargs):
return path(self.graph, *args, **kwargs)
def __call__(self, *args, **kwargs):
return _transform(self.graph, *args, **kwargs)
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types,
**kwargs):
""" Transform source to target type using graph of transformations """
# take a copy so we can mutate without affecting the input
excluded_edges = (excluded_edges.copy()
if excluded_edges is not None else
set())
with ignoring(NotImplementedError):
if 'dshape' not in kwargs or kwargs['dshape'] is None:
kwargs['dshape'] = discover(source)
pth = path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types)
x = source
path_proxy = IterProxy(pth)
for convert_from, convert_to, f, cost in path_proxy:
try:
x = f(x, excluded_edges=excluded_edges, **kwargs)
except NotImplementedError as e:
if kwargs.get('raise_on_errors'):
raise
warn(FailedConversionWarning(convert_from, convert_to, e))
# exclude the broken edge
excluded_edges |= {(convert_from, convert_to)}
# compute the path from `source` to `target` excluding
# the edge that broke
fresh_path = list(path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types))
fresh_path_cost = path_cost(fresh_path)
# compute the path from the current `convert_from` type
# to the `target`
try:
greedy_path = list(path(graph, convert_from, target,
excluded_edges=excluded_edges,
ooc_types=ooc_types))
except nx.exception.NetworkXNoPath:
greedy_path_cost = np.inf
else:
greedy_path_cost = path_cost(greedy_path)
if fresh_path_cost < greedy_path_cost:
# it is faster to start over from `source` with a new path
x = source
pth = fresh_path
else:
# it is faster to work around our broken edge from our
# current location
pth = greedy_path
path_proxy.it = pth
return x
PathPart = namedtuple('PathPart', 'convert_from convert_to func cost')
_virtual_superclasses = (Iterator,)
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
for cls in concatv(source.mro(), _virtual_superclasses):
if cls in graph:
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
edge = adjacency(graph)
def path_part(src, tgt):
node = edge[src][tgt]
return PathPart(src, tgt, node['func'], node['cost'])
return map(path_part, pth, pth[1:])
def path_cost(path):
"""Calculate the total cost of a path.
"""
return sum(p.cost for p in path)
@contextmanager
def without_edges(g, edges):
edges = edges or []
held = dict()
_g_edge = adjacency(g)
for a, b in edges:
held[(a, b)] = _g_edge[a][b]
g.remove_edge(a, b)
try:
yield g
finally:
for (a, b), kwargs in held.items():
g.add_edge(a, b, **kwargs)
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/core.py",
"copies": "2",
"size": "5915",
"license": "bsd-3-clause",
"hash": 1939837459934385000,
"line_mean": 28.575,
"line_max": 79,
"alpha_frac": 0.5666948436,
"autogenerated": false,
"ratio": 4.02107409925221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587768942852209,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from collections import namedtuple
from hashlib import sha256
import os
import shutil
import sys
import tempfile
import fnmatch
from sympy.utilities.pytest import XFAIL
def may_xfail(func):
if sys.platform.lower() == 'darwin' or os.name == 'nt':
# sympy.utilities._compilation needs more testing on Windows and macOS
# once those two platforms are reliably supported this xfail decorator
# may be removed.
return XFAIL(func)
else:
return func
if sys.version_info[0] == 2:
class FileNotFoundError(IOError):
pass
class TemporaryDirectory(object):
def __init__(self):
self.path = tempfile.mkdtemp()
def __enter__(self):
return self.path
def __exit__(self, exc, value, tb):
shutil.rmtree(self.path)
else:
FileNotFoundError = FileNotFoundError
TemporaryDirectory = tempfile.TemporaryDirectory
class CompilerNotFoundError(FileNotFoundError):
pass
def get_abspath(path, cwd='.'):
""" Returns the aboslute path.
Parameters
==========
path : str
(relative) path.
cwd : str
Path to root of relative path.
"""
if os.path.isabs(path):
return path
else:
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path):
""" Create directories (equivalent of ``mkdir -p``). """
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent)
if not os.path.exists(path):
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False):
""" Variation of ``shutil.copy`` with extra options.
Parameters
==========
src : str
Path to source file.
dst : str
Path to destination.
only_update : bool
Only copy if source is newer than destination
(returns None if it was newer), default: ``False``.
copystat : bool
See ``shutil.copystat``. default: ``True``.
cwd : str
Path to working directory (root of relative paths).
dest_is_dir : bool
Ensures that dst is treated as a directory. default: ``False``
create_dest_dirs : bool
Creates directories if needed.
Returns
=======
Path to the copied file.
"""
if cwd: # Handle working directory
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
if not os.path.exists(src): # Make sure source file extists
raise FileNotFoundError("Source: `{}` does not exist".format(src))
# We accept both (re)naming destination file _or_
# passing a (possible non-existent) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir)
else:
raise FileNotFoundError("You must create directory first.")
if only_update:
# This function is not defined:
# XXX: This branch is clearly not tested!
if not missing_or_other_newer(dst, src): # noqa
return
if os.path.islink(dst):
dst = os.path.abspath(os.path.realpath(dst), cwd=cwd)
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
# This is not tested:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def sha256_of_file(path, nblocks=128):
""" Computes the SHA256 hash of a file.
Parameters
==========
path : string
Path to file to compute hash of.
nblocks : int
Number of blocks to read per iteration.
Returns
=======
hashlib sha256 hash object. Use ``.digest()`` or ``.hexdigest()``
on returned object to get binary or hex encoded string.
"""
sh = sha256()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*sh.block_size), b''):
sh.update(chunk)
return sh
def sha256_of_string(string):
""" Computes the SHA256 hash of a string. """
sh = sha256()
sh.update(string)
return sh
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def import_module_from_file(filename, only_if_newer_than=None):
""" Imports python extension (from shared object file)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: The OS may cache shared objects which makes
reimporting same path of an shared object file very problematic.
It will not detect the new time stamp, nor new checksum, but will
instead silently use old module. Use unique names for this reason.
Parameters
==========
filename : str
Path to shared object.
only_if_newer_than : iterable of strings
Paths to dependencies of the shared object.
Raises
======
``ImportError`` if any of the files specified in ``only_if_newer_than`` are newer
than the file given by filename.
"""
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
if sys.version_info[0] == 2:
from imp import find_module, load_module
fobj, filename, data = find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = load_module(name, fobj, filename, data)
else:
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
if spec is None:
raise ImportError("Failed to import: '%s'" % filename)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def find_binary_of_command(candidates):
""" Finds binary first matching name among candidates.
Calls `find_executable` from distuils for provided candidates and returns
first hit.
Parameters
==========
candidates : iterable of str
Names of candidate commands
Raises
======
CompilerNotFoundError if no candidates match.
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise CompilerNotFoundError('No binary located for candidates: {}'.format(candidates))
def unique_list(l):
""" Uniquify a list (skip duplicate items). """
result = []
for x in l:
if x not in result:
result.append(x)
return result
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/utilities/_compilation/util.py",
"copies": "1",
"size": "8374",
"license": "bsd-3-clause",
"hash": -3200713744012394500,
"line_mean": 26.8205980066,
"line_max": 90,
"alpha_frac": 0.602340578,
"autogenerated": false,
"ratio": 3.9668403600189484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069180938018948,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from itertools import starmap
from timeit import default_timer
from time import sleep
from multiprocessing import Process, Pipe, current_process
from ..callbacks import Callback
from ..utils import import_required
# Stores execution data for each task
TaskData = namedtuple('TaskData', ('key', 'task', 'start_time',
'end_time', 'worker_id'))
class Profiler(Callback):
"""A profiler for dask execution at the task level.
Records the following information for each task:
1. Key
2. Task
3. Start time in seconds since the epoch
4. Finish time in seconds since the epoch
5. Worker id
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with Profiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[('y', (add, 'x', 10), 1435352238.48039, 1435352238.480655, 140285575100160),
('z', (mul, 'y', 2), 1435352238.480657, 1435352238.480803, 140285566707456)]
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register() # doctest: +SKIP
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self):
self._results = {}
self.results = []
self._dsk = {}
def __enter__(self):
self.clear()
return super(Profiler, self).__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
def _pretask(self, key, dsk, state):
start = default_timer()
self._results[key] = (key, dsk[key], start)
def _posttask(self, key, value, dsk, state, id):
end = default_timer()
self._results[key] += (end, id)
def _finish(self, dsk, state, failed):
results = dict((k, v) for k, v in self._results.items() if len(v) == 5)
self.results += list(starmap(TaskData, results.values()))
self._results.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_tasks
return plot_tasks(self.results, self._dsk, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self._results.clear()
del self.results[:]
self._dsk = {}
ResourceData = namedtuple('ResourceData', ('time', 'mem', 'cpu'))
class ResourceProfiler(Callback):
"""A profiler for resource use.
Records the following each timestep
1. Time in seconds since the epoch
2. Memory usage in MB
3. % CPU usage
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with ResourceProfiler() as prof: # doctest: +SKIP
... get(dsk, 'z')
22
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register() # doctest: +SKIP
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear() # doctest: +SKIP
"""
def __init__(self, dt=1):
self._tracker = _Tracker(dt)
self._tracker.start()
self.results = []
self._entered = False
def _start_collect(self):
assert self._tracker.is_alive(), "Resource tracker is shutdown"
self._tracker.parent_conn.send('collect')
def _stop_collect(self):
if self._tracker.is_alive():
self._tracker.parent_conn.send('send_data')
self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))
def __enter__(self):
self.clear()
self._entered = True
self._start_collect()
return super(ResourceProfiler, self).__enter__()
def __exit__(self, *args):
self._entered = False
self._stop_collect()
super(ResourceProfiler, self).__exit__(*args)
def _start(self, dsk):
self._start_collect()
def _finish(self, dsk, state, failed):
if not self._entered:
self._stop_collect()
def close(self):
"""Shutdown the resource tracker process"""
self._tracker.shutdown()
__del__ = close
def clear(self):
self.results = []
def _plot(self, **kwargs):
from .profile_visualize import plot_resources
return plot_resources(self.results, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
class _Tracker(Process):
"""Background process for tracking resource usage"""
def __init__(self, dt=1):
psutil = import_required("psutil", "Tracking resource usage requires "
"`psutil` to be installed")
Process.__init__(self)
self.daemon = True
self.dt = dt
self.parent = psutil.Process(current_process().pid)
self.parent_conn, self.child_conn = Pipe()
def shutdown(self):
if not self.parent_conn.closed:
self.parent_conn.send('shutdown')
self.parent_conn.close()
self.join()
def _update_pids(self, pid):
return [self.parent] + [p for p in self.parent.children()
if p.pid != pid and p.status() != 'zombie']
def run(self):
pid = current_process()
ps = self._update_pids(pid)
data = []
while True:
try:
msg = self.child_conn.recv()
except KeyboardInterrupt:
continue
if msg == 'shutdown':
break
elif msg == 'collect':
ps = self._update_pids(pid)
while not self.child_conn.poll():
tic = default_timer()
mem = cpu = 0
for p in ps:
try:
mem2 = p.memory_info().rss
cpu2 = p.cpu_percent()
except Exception: # could be a few different exceptions
pass
else:
# Only increment if both were successful
mem += mem2
cpu += cpu2
data.append((tic, mem / 1e6, cpu))
sleep(self.dt)
elif msg == 'send_data':
self.child_conn.send(data)
data = []
self.child_conn.close()
CacheData = namedtuple('CacheData', ('key', 'task', 'metric', 'cache_time',
'free_time'))
class CacheProfiler(Callback):
"""A profiler for dask execution at the scheduler cache level.
Records the following information for each task:
1. Key
2. Task
3. Size metric
4. Cache entry time in seconds since the epoch
5. Cache exit time in seconds since the epoch
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with CacheProfiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[CacheData('y', (add, 'x', 10), 1, 1435352238.48039, 1435352238.480655),
CacheData('z', (mul, 'y', 2), 1, 1435352238.480657, 1435352238.480803)]
The default is to count each task (``metric`` is 1 for all tasks). Other
functions may used as a metric instead through the ``metric`` keyword. For
example, the ``nbytes`` function found in ``cachey`` can be used to measure
the number of bytes in the cache.
>>> from cachey import nbytes # doctest: +SKIP
>>> with CacheProfiler(metric=nbytes) as prof: # doctest: +SKIP
... get(dsk, 'z')
The profiling results can be visualized in a bokeh plot using the
``visualize`` method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register() # doctest: +SKIP
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self, metric=None, metric_name=None):
self._metric = metric if metric else lambda value: 1
if metric_name:
self._metric_name = metric_name
elif metric:
self._metric_name = metric.__name__
else:
self._metric_name = 'count'
def __enter__(self):
self.clear()
return super(CacheProfiler, self).__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
if not self._start_time:
self._start_time = default_timer()
def _posttask(self, key, value, dsk, state, id):
t = default_timer()
self._cache[key] = (self._metric(value), t)
for k in state['released'].intersection(self._cache):
metric, start = self._cache.pop(k)
self.results.append(CacheData(k, dsk[k], metric, start, t))
def _finish(self, dsk, state, failed):
t = default_timer()
for k, (metric, start) in self._cache.items():
self.results.append(CacheData(k, dsk[k], metric, start, t))
self._cache.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_cache
return plot_cache(self.results, self._dsk, self._start_time,
self._metric_name, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self.results = []
self._cache = {}
self._dsk = {}
self._start_time = None
| {
"repo_name": "mraspaud/dask",
"path": "dask/diagnostics/profile.py",
"copies": "1",
"size": "10884",
"license": "bsd-3-clause",
"hash": 1514948959712688600,
"line_mean": 29.7457627119,
"line_max": 88,
"alpha_frac": 0.5610988607,
"autogenerated": false,
"ratio": 3.9985304922850844,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001359497145723695,
"num_lines": 354
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import json
import multiprocessing.pool
import os
import re
import sys
import requests
from bs4 import BeautifulSoup
import nfldb
import nflfan.config
__pdoc__ = {}
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64)'
"""
The user agent string is heuristically determined. Namely, I was having
problems getting some providers to authenticate with more vague user
agent strings.
You may want to use a different user agent string entirely if you're
writing your own provider.
"""
_urls = {
'yahoo': {
'owner': 'http://football.fantasysports.yahoo.com/f1/%s/teams',
'matchup': 'http://football.fantasysports.yahoo.com/f1/%s/'
'matchup?matchup_week=%d&ajaxrequest=1',
'roster': 'http://football.fantasysports.yahoo.com/f1/%s/%s?week=%d',
},
'espn': {
'owner': 'http://games.espn.go.com/ffl/leaguesetup'
'/ownerinfo?leagueId={league_id}&seasonId={season_id}',
'matchup': 'http://games.espn.go.com/ffl/scoreboard?'
'leagueId={league_id}&matchupPeriodId={week}'
'&seasonId={season_id}',
'roster': 'http://games.espn.go.com/ffl/playertable/prebuilt/'
'manageroster?leagueId={league_id}&teamId={team_id}'
'&seasonId={season_id}&scoringPeriodId={week}'
'&view=overview&context=clubhouse'
'&ajaxPath=playertable/prebuilt/manageroster'
'&managingIr=false&droppingPlayers=false&asLM=false',
},
}
def pp(soup):
print(soup.prettify().encode('utf-8'))
def eprint(*args, **kwargs):
kwargs['file'] = sys.stderr
args = ['[nflfan]'] + list(args)
print(*args, **kwargs)
def player_search(db, full_name, team=None, position=None):
"""
A thin wrapper around `nfldb.player_search` that tries searching
with `team` or `position` when given, but if no results are found,
then this returns the results of a search with just the full name.
This allows for a slightly out-of-date database to still provide
a match while also disambiguating players with the same name.
"""
if position not in nfldb.Enums.player_pos:
position = None
p, _ = nfldb.player_search(db, full_name, team=team, position=position)
if p is None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=team, position=None)
if p is None and team is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=position)
if p is None and team is not None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=None)
return p
class League (namedtuple('League',
'season phase ident prov_name name scoring conf')):
__pdoc__['League.season'] = \
"""The year of the NFL season for this league."""
__pdoc__['League.phase'] = \
"""The phase of the season: preseason, regular or post."""
__pdoc__['League.ident'] = \
"""
A unique identifier for this league. The type and format of
this value is provider dependent.
"""
__pdoc__['League.prov_name'] = \
"""The name of the provider for this league."""
__pdoc__['League.name'] = \
"""The name of this league from the configuration."""
__pdoc__['League.scoring'] = \
"""The `nflfan.ScoreSchema` for this league."""
__pdoc__['League.conf'] = \
"""
A dictionary of configuration settings. The keys and values in
this dictionary are provider dependent.
"""
def __init__(self, *args):
super(League, self).__init__(*args)
self._cache = {}
@property
def full_name(self):
return '%s.%s' % (self.prov_name, self.name)
def is_me(self, obj):
if not self.conf.get('me', None):
return False
if isinstance(obj, Roster):
return self.is_me(obj.owner)
elif isinstance(obj, Matchup):
return self.is_me(obj.owner1) or self.is_me(obj.owner2)
else:
return self.conf['me'].lower() in obj.name.lower()
def me(self, objs):
for obj in objs:
if self.is_me(obj):
return obj
return None
def owners(self, week):
return self._cached(week, 'owners')
def owner(self, week, ident):
for o in self.owners(week):
if o.ident == ident:
return o
return None
def matchups(self, week):
return self._cached(week, 'matchups')
def matchup(self, week, ident):
for m in self.matchups(week):
if m.owner1.ident == ident or m.owner2.ident == ident:
return m
return None
def rosters(self, week):
return self._cached(week, 'rosters')
def roster(self, week, ident):
for r in self.rosters(week):
if r.owner.ident == ident:
return r
return None
def cache_path(self, week):
return os.path.join(nflfan.config.cache_path(),
str(self.season), str(self.phase), str(week),
self.full_name + '.json')
def _cached(self, week, key):
if week not in self._cache:
self._load(week)
return self._cache[week][key]
def _load(self, week):
raw = None
fp = self.cache_path(week)
try:
with open(fp) as f:
raw = json.load(f)
except IOError:
raise IOError(
"No cached data for week %d in %s could be found at %s\n"
"Have you run `nflfan-update --week %d` yet?"
% (week, self.full_name, fp, week))
d = {'owners': [], 'matchups': [], 'rosters': []}
for owner in raw['owners']:
d['owners'].append(Owner._make(owner))
for matchup in raw['matchups']:
o1 = None if matchup[0] is None else Owner._make(matchup[0])
o2 = None if matchup[1] is None else Owner._make(matchup[1])
d['matchups'].append(Matchup(o1, o2))
for roster in raw['rosters']:
o = Owner._make(roster[0])
r = Roster(o, roster[1], roster[2], [])
for rp in roster[3]:
r.players.append(RosterPlayer._make(rp))
d['rosters'].append(r)
self._cache[week] = d
def __str__(self):
return self.full_name
class Matchup (namedtuple('Matchup', 'owner1 owner2')):
__pdoc__['Matchup.owner1'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
__pdoc__['Matchup.owner2'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
def other(self, ident):
"""
Given an identifier for one of the owner's in this matchup,
return the `nflfan.Owner` of the other owner.
"""
assert ident in (self.owner1.ident, self.owner2.ident)
if ident == self.owner1.ident:
return self.owner2
else:
return self.owner1
def __str__(self):
return '%s vs. %s' % (self.owner1, self.owner2)
class Owner (namedtuple('Owner', 'ident name')):
__pdoc__['Owner.ident'] = \
"""
A unique identifier corresponding to this owner. The type
of this value is provider-dependent.
"""
__pdoc__['Owner.name'] = \
"""A string representing the name of this owner."""
def __str__(self):
return self.name
class Roster (namedtuple('Roster', 'owner season week players')):
__pdoc__['Roster.owner'] = \
"""
A `nflfan.Owner` object corresponding to the owner of this
roster.
"""
__pdoc__['Roster.players'] = \
"""
A list of `nflfan.RosterPlayer` objects corresponding to the
set of players on this roster.
"""
def new_player(self, pos, team, bench, player_id):
"""
A convenience method for creating a new `nflfan.RosterPlayer`
given the current roster.
"""
return RosterPlayer(pos, team, bench, self.season, self.week,
None, 0.0, None, player_id)
@property
def active(self):
return filter(lambda rp: not rp.bench, self.players)
@property
def benched(self):
return filter(lambda rp: rp.bench, self.players)
@property
def points(self):
"""Returns the total number of points for non-benched players."""
return sum(p.points for p in self.players if not p.bench)
def __str__(self):
s = []
for rp in self.players:
s.append(str(rp))
return '\n'.join(s)
class RosterPlayer (
namedtuple('RosterPlayer',
'position team bench season week '
'game points player player_id')):
__pdoc__['RosterPlayer.position'] = \
"""
A string corresponding to the position of the roster spot
occupied by this player. The possible values of this string are
provider dependent.
"""
__pdoc__['RosterPlayer.team'] = \
"""
A team abbreviation that this player belongs to. It must be a
valid nfldb team abbreviation and *cannot* be `UNK`.
"""
__pdoc__['RosterPlayer.bench'] = \
"""A boolean indicating whether this is a bench position or not."""
__pdoc__['RosterPlayer.season'] = \
"""The year of the corresponding NFL season."""
__pdoc__['RosterPlayer.week'] = \
"""The week number in which this roster was set."""
__pdoc__['RosterPlayer.game'] = \
"""
The `nfldb.Game` object for the game that this player played
in. If this roster position corresponds to a bye week, then
this attribute is set to `None`.
"""
__pdoc__['RosterPlayer.points'] = \
"""The total fantasy points for this roster player."""
__pdoc__['RosterPlayer.player'] = \
"""
A `nfldb.Player` object corresponding to this roster player.
This attribute is `None` by default, and is always `None` for
roster players corresponding to entire teams (e.g., defense).
"""
__pdoc__['RosterPlayer.player_id'] = \
"""
A player id string corresponding to the player in this roster
position and a player in nfldb. This may be `None` when the
roster player corresponds to an entire team. (e.g., A defense.)
"""
@property
def is_empty(self):
return self.team is None and self.player_id is None
@property
def is_defense(self):
return self.team is not None and self.player_id is None
@property
def is_player(self):
return self.player_id is not None
@property
def id(self):
if self.is_empty:
return 'Empty'
elif self.is_defense:
return self.team
else:
return self.player_id
@property
def name(self):
return self.id if not self.player else self.player.full_name
def __str__(self):
if self.game is not None and self.game.is_playing:
playing = '*'
else:
playing = ' '
return '%-6s %-4s %-20s %s%0.2f' \
% (self.position, self.team, self.name, playing, self.points)
class Provider (object):
"""
This class describes the interface that each fantasy football
provider must implement so that it can work with nflfan. In other
words, this is an abstract base class that should **not** be
instantiated directly.
All public members of this class must also be defined in each
provider implementation, including the class variables.
"""
provider_name = None
"""The name of the provider used in the configuration file."""
conf_required = ['scoring', 'league_name', 'season', 'phase', 'league_id']
"""A list of fields required for every provider."""
conf_optional = ['me']
"""A list of fields that are optional for every provider."""
def __init__(self, lg):
self._lg = lg
self._session = requests.Session()
self._session.headers.update(getattr(self, '_headers', {}))
def owners(self):
"""Returns a list of `nflfan.Owner` objects."""
assert False, 'subclass responsibility'
def matchups(self, week):
"""
Given a week number, this returns a list of `nflfan.Matchup`
objects describing the head-to-head matchups for `week`.
"""
assert False, 'subclass responsibility'
def roster(self, player_search, owner, week):
"""
Given a `nflfan.Owner` and a week number, this returns a
`nflfan.Roster` object. The `nflfan.Roster` contains a list of
`nfldb.Player` objects and their corresponding position on the
roster.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
Note that the roster position is a string but the set of
possible values is provider dependent. It is used for display
purposes only.
"""
assert False, 'subclass responsibility'
def save(self, fp, player_search, week):
"""
Writes a JSON encoding of all the owners, matchups and rosters
for the given week to a file at `fp`.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
"""
d = {
'owners': self.owners(),
'matchups': self.matchups(week),
}
# I'm hoping this doesn't hurt custom providers that don't need
# to do IO to fetch a roster.
def roster(owner):
return self.roster(player_search, owner, week)
# pool = multiprocessing.pool.ThreadPool(3)
# d['rosters'] = pool.map(roster, d['owners'])
d['rosters'] = map(roster, d['owners'])
json.dump(d, open(fp, 'w+'))
def _request(self, url):
eprint('download %s' % url)
r = self._session.get(url)
soup = BeautifulSoup(r.text)
if self._login_form(soup):
self._login()
r = self._session.get(url)
soup = BeautifulSoup(r.text)
if self._login_form(soup):
raise IOError("Authentication failure.")
return r
def _login(self):
assert self._login_url is not None
soup = BeautifulSoup(self._session.get(self._login_url).text)
if not self._login_form(soup):
# Already logged in!
return
form = self._login_form(soup)
params = self._login_params()
for inp in form.find_all('input', type='hidden'):
params[inp['name']] = inp['value']
r = self._session.post(form['action'], params=params)
return BeautifulSoup(r.text)
def _login_params(self):
assert False, 'subclass responsibility'
def _login_form(self, soup):
assert False, 'subclass responsibility'
def __str__(self):
return self.__class__.provider_name
class Yahoo (Provider):
provider_name = 'yahoo'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'https://login.yahoo.com/config/login'
def __init__(self, lg):
super(Yahoo, self).__init__(lg)
_, _, self._league_num = self._lg.ident.split('.')
def owners(self):
match_owner_link = re.compile('team-[0-9]+-name')
url = _urls['yahoo']['owner'] % self._league_num
soup = BeautifulSoup(self._request(url).text)
owners = []
for link in soup.find_all(id=match_owner_link):
ident = self._owner_id_from_url(link['href'])
owners.append(Owner(ident, link.text.strip()))
return owners
def matchups(self, week):
mk_owner = lambda div: Owner(owner_id(div.a['href']), div.text.strip())
owner_id = self._owner_id_from_url
url = _urls['yahoo']['matchup'] % (self._league_num, week)
rjson = self._request(url).json()
soup = BeautifulSoup(rjson['content'])
matchups = []
for matchup in soup.find('ul').children:
pair = list(matchup.find_all('div', class_='Fz-sm'))
if len(pair) == 1:
matchups.append(Matchup(mk_owner(pair[0]), None))
else:
matchups.append(Matchup(mk_owner(pair[0]), mk_owner(pair[1])))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
return row.td.find(class_='pos-label')['data-pos'].strip().upper()
def to_name(row):
return row.find(class_='ysf-player-name').a.text.strip()
def to_team(row):
team_pos = row.find(class_='ysf-player-name').span.text.strip()
return nfldb.standard_team(re.search('^\S+', team_pos).group(0))
def rplayer(r, name, team, pos):
bench = pos == 'BN'
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif nfldb.standard_team(name) != 'UNK':
return r.new_player(pos, team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
match_table_id = re.compile('^statTable[0-9]+$')
url = _urls['yahoo']['roster'] % (self._league_num, owner.ident, week)
soup = BeautifulSoup(self._request(url).text)
roster = Roster(owner, self._lg.season, week, [])
for table in soup.find_all(id=match_table_id):
for row in table.tbody.find_all('tr', recursive=False):
pos = to_pos(row)
try:
team, name = to_team(row), to_name(row)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('%s/([0-9]+)' % self._league_num, url).group(1)
def _login(self):
soup = super(Yahoo, self)._login()
if self._login_form(soup):
err_div = soup.find('div', class_='yregertxt')
err_msg = 'Unknown error.'
if err_div:
err_msg = err_div.text.strip()
raise IOError('Login failed: %s' % err_msg)
def _login_params(self):
return {
'login': self._lg.conf.get('username', ''),
'passwd': self._lg.conf.get('password', ''),
'.save': 'Sign In',
}
def _login_form(self, soup):
return soup.find(id='login_form')
class ESPN (Provider):
provider_name = 'espn'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'http://games.espn.go.com/ffl/signin?_=_'
def owners(self):
url = _urls['espn']['owner'].format(
league_id=self._lg.ident, season_id=self._lg.season)
soup = BeautifulSoup(self._request(url).text)
owners = []
for td in soup.select('tr.ownerRow td.teamName'):
ident = self._owner_id_from_url(td.a['href'])
owners.append(Owner(ident, td.text.strip()))
return owners
def matchups(self, week):
owner_id = self._owner_id_from_url
url = _urls['espn']['matchup'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week)
soup = BeautifulSoup(self._request(url).text)
matchupDiv = soup.find(id='scoreboardMatchups')
matchups = []
for table in matchupDiv.select('table.matchup'):
t1, t2 = list(table.find_all(class_='name'))
id1, id2 = owner_id(t1.a['href']), owner_id(t2.a['href'])
name1, name2 = t1.a.text.strip(), t2.a.text.strip()
o1, o2 = Owner(id1, name1), Owner(id2, name2)
matchups.append(Matchup(o1, o2))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
pos = row.find(class_='playerSlot').text.strip().upper()
if pos == 'BENCH':
return 'BN'
return pos
def to_name(row):
name = row.find(class_='playertablePlayerName').a.text.strip()
# If this is the defense, apparently 'D/ST' is included in
# the name. Wtf?
return re.sub('\s+D/ST$', '', name)
def to_team(row):
tpos = row.find(class_='playertablePlayerName').a.next_sibling
tpos = tpos.strip(' \r\n\t*,|').upper()
# This is a little weird because the team name seems to run
# in with the position. Perhaps a weird encoding quirk?
if len(tpos) < 2:
return 'UNK'
elif len(tpos) == 2:
return nfldb.standard_team(tpos)
else:
team = nfldb.standard_team(tpos[0:3])
if team == 'UNK':
team = nfldb.standard_team(tpos[0:2])
return team
def rplayer(r, name, team, pos):
bench = pos == 'BN'
name_team = nfldb.standard_team(name)
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif name_team != 'UNK':
return r.new_player(pos, name_team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
url = _urls['espn']['roster'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week,
team_id=owner.ident)
soup = BeautifulSoup(self._request(url).text)
roster = Roster(owner, self._lg.season, week, [])
for tr in soup.select('tr.pncPlayerRow'):
if tr.get('id', '') == 'pncEmptyRow':
continue
pos = to_pos(tr)
try:
team, name = to_team(tr), to_name(tr)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('teamId=([0-9]+)', url).group(1)
def _login(self):
soup = super(ESPN, self)._login()
if self._login_form(soup):
err_msg = []
for msg in soup.find_all('font', color='#ff0000'):
err_msg.append(msg.text.strip())
err_msg = '\n'.join(err_msg) if err_msg else 'Unknown error.'
raise IOError('Login failed: %s' % err_msg)
def _login_params(self):
return {
'username': self._lg.conf.get('username', ''),
'password': self._lg.conf.get('password', ''),
'submit': 'Sign In',
}
def _login_form(self, soup):
return soup.find('form', attrs={'name': 'loginForm'})
| {
"repo_name": "codeaudit/nflfan",
"path": "nflfan/provider.py",
"copies": "1",
"size": "23778",
"license": "unlicense",
"hash": -4465141964907188000,
"line_mean": 32.8717948718,
"line_max": 79,
"alpha_frac": 0.5658591976,
"autogenerated": false,
"ratio": 3.7333961375412152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9798663843590604,
"avg_score": 0.00011829831012220949,
"num_lines": 702
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import json
import os
import re
import sys
import time
import requests
from bs4 import BeautifulSoup
import nfldb
import nflfan.config
__pdoc__ = {}
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64)'
# _user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2498.0 Safari/537.36'
# _user_agent = ''
"""
The user agent string is heuristically determined. Namely, I was having
problems getting some providers to authenticate with more vague user
agent strings.
You may want to use a different user agent string entirely if you're
writing your own provider.
"""
_urls = {
'yahoo': {
'owner': 'http://football.fantasysports.yahoo.com/f1/%s/teams',
'matchup': 'http://football.fantasysports.yahoo.com/f1/%s/'
'matchup?matchup_week=%d&ajaxrequest=1',
'roster': 'http://football.fantasysports.yahoo.com/f1/%s/%s?week=%d',
},
'espn': {
'owner': 'http://games.espn.go.com/ffl/leaguesetup'
'/ownerinfo?leagueId={league_id}&seasonId={season_id}',
'matchup': 'http://games.espn.go.com/ffl/scoreboard?'
'leagueId={league_id}&matchupPeriodId={week}'
'&seasonId={season_id}',
'roster': 'http://games.espn.go.com/ffl/playertable/prebuilt/'
'manageroster?leagueId={league_id}&teamId={team_id}'
'&seasonId={season_id}&scoringPeriodId={week}'
'&view=overview&context=clubhouse'
'&ajaxPath=playertable/prebuilt/manageroster'
'&managingIr=false&droppingPlayers=false&asLM=false',
},
}
def pp(soup):
print(soup.prettify().encode('utf-8'))
def eprint(*args, **kwargs):
kwargs['file'] = sys.stderr
args = ['[nflfan]'] + list(args)
print(*args, **kwargs)
def player_search(db, full_name, team=None, position=None):
"""
A thin wrapper around `nfldb.player_search` that tries searching
with `team` or `position` when given, but if no results are found,
then this returns the results of a search with just the full name.
This allows for a slightly out-of-date database to still provide
a match while also disambiguating players with the same name.
"""
if position not in nfldb.Enums.player_pos:
position = None
p, _ = nfldb.player_search(db, full_name, team=team, position=position)
if p is None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=team, position=None)
if p is None and team is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=position)
if p is None and team is not None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=None)
return p
class League (namedtuple('League',
'season phase ident prov_name name scoring conf')):
__pdoc__['League.season'] = \
"""The year of the NFL season for this league."""
__pdoc__['League.phase'] = \
"""The phase of the season: preseason, regular or post."""
__pdoc__['League.ident'] = \
"""
A unique identifier for this league. The type and format of
this value is provider dependent.
"""
__pdoc__['League.prov_name'] = \
"""The name of the provider for this league."""
__pdoc__['League.name'] = \
"""The name of this league from the configuration."""
__pdoc__['League.scoring'] = \
"""The `nflfan.ScoreSchema` for this league."""
__pdoc__['League.conf'] = \
"""
A dictionary of configuration settings. The keys and values in
this dictionary are provider dependent.
"""
def __init__(self, *args):
super(League, self).__init__(*args)
self._cache = {}
@property
def full_name(self):
return '%s.%s' % (self.prov_name, self.name)
def is_me(self, obj):
if not self.conf.get('me', None):
return False
if isinstance(obj, Roster):
return self.is_me(obj.owner)
elif isinstance(obj, Matchup):
return self.is_me(obj.owner1) or self.is_me(obj.owner2)
else:
return self.conf['me'].lower() in obj.name.lower()
def me(self, objs):
for obj in objs:
if self.is_me(obj):
return obj
return None
def owners(self, week):
return self._cached(week, 'owners')
def owner(self, week, ident):
for o in self.owners(week):
if o.ident == ident:
return o
return None
def matchups(self, week):
return self._cached(week, 'matchups')
def matchup(self, week, ident):
for m in self.matchups(week):
if m.owner1 is None or m.owner2 is None:
continue
if m.owner1.ident == ident or m.owner2.ident == ident:
return m
return None
def rosters(self, week):
return self._cached(week, 'rosters')
def roster(self, week, ident):
for r in self.rosters(week):
if r.owner.ident == ident:
return r
return None
def cache_path(self, week):
return os.path.join(nflfan.config.cache_path(),
str(self.season), str(self.phase), str(week),
self.full_name + '.json')
def _cached(self, week, key):
if week not in self._cache:
self._load(week)
return self._cache[week][key]
def _load(self, week):
raw = None
fp = self.cache_path(week)
try:
with open(fp) as f:
raw = json.load(f)
except IOError:
raise IOError(
"No cached data for week %d in %s could be found at %s\n"
"Have you run `nflfan-update --week %d` yet?"
% (week, self.full_name, fp, week))
d = {'owners': [], 'matchups': [], 'rosters': []}
for owner in raw['owners']:
d['owners'].append(Owner._make(owner))
for matchup in raw['matchups']:
o1 = None if matchup[0] is None else Owner._make(matchup[0])
o2 = None if matchup[1] is None else Owner._make(matchup[1])
d['matchups'].append(Matchup(o1, o2))
for roster in raw['rosters']:
o = Owner._make(roster[0])
r = Roster(o, roster[1], roster[2], [])
for rp in roster[3]:
r.players.append(RosterPlayer._make(rp))
d['rosters'].append(r)
self._cache[week] = d
def __str__(self):
return self.full_name
class Matchup (namedtuple('Matchup', 'owner1 owner2')):
__pdoc__['Matchup.owner1'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
__pdoc__['Matchup.owner2'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
def other(self, ident):
"""
Given an identifier for one of the owner's in this matchup,
return the `nflfan.Owner` of the other owner.
"""
assert ident in (self.owner1.ident, self.owner2.ident)
if ident == self.owner1.ident:
return self.owner2
else:
return self.owner1
def __str__(self):
return '%s vs. %s' % (self.owner1, self.owner2)
class Owner (namedtuple('Owner', 'ident name')):
__pdoc__['Owner.ident'] = \
"""
A unique identifier corresponding to this owner. The type
of this value is provider-dependent.
"""
__pdoc__['Owner.name'] = \
"""A string representing the name of this owner."""
def __str__(self):
return self.name
class Roster (namedtuple('Roster', 'owner season week players')):
__pdoc__['Roster.owner'] = \
"""
A `nflfan.Owner` object corresponding to the owner of this
roster.
"""
__pdoc__['Roster.players'] = \
"""
A list of `nflfan.RosterPlayer` objects corresponding to the
set of players on this roster.
"""
def new_player(self, pos, team, bench, player_id):
"""
A convenience method for creating a new `nflfan.RosterPlayer`
given the current roster.
"""
return RosterPlayer(pos, team, bench, self.season, self.week,
None, 0.0, None, player_id)
@property
def active(self):
return filter(lambda rp: not rp.bench, self.players)
@property
def benched(self):
return filter(lambda rp: rp.bench, self.players)
@property
def points(self):
"""Returns the total number of points for non-benched players."""
return sum(p.points for p in self.players if not p.bench)
def __str__(self):
s = []
for rp in self.players:
s.append(str(rp))
return '\n'.join(s)
class RosterPlayer (
namedtuple('RosterPlayer',
'position team bench season week '
'game points player player_id')):
__pdoc__['RosterPlayer.position'] = \
"""
A string corresponding to the position of the roster spot
occupied by this player. The possible values of this string are
provider dependent.
"""
__pdoc__['RosterPlayer.team'] = \
"""
A team abbreviation that this player belongs to. It must be a
valid nfldb team abbreviation and *cannot* be `UNK`.
"""
__pdoc__['RosterPlayer.bench'] = \
"""A boolean indicating whether this is a bench position or not."""
__pdoc__['RosterPlayer.season'] = \
"""The year of the corresponding NFL season."""
__pdoc__['RosterPlayer.week'] = \
"""The week number in which this roster was set."""
__pdoc__['RosterPlayer.game'] = \
"""
The `nfldb.Game` object for the game that this player played
in. If this roster position corresponds to a bye week, then
this attribute is set to `None`.
"""
__pdoc__['RosterPlayer.points'] = \
"""The total fantasy points for this roster player."""
__pdoc__['RosterPlayer.player'] = \
"""
A `nfldb.Player` object corresponding to this roster player.
This attribute is `None` by default, and is always `None` for
roster players corresponding to entire teams (e.g., defense).
"""
__pdoc__['RosterPlayer.player_id'] = \
"""
A player id string corresponding to the player in this roster
position and a player in nfldb. This may be `None` when the
roster player corresponds to an entire team. (e.g., A defense.)
"""
@property
def is_empty(self):
return self.team is None and self.player_id is None
@property
def is_defense(self):
return self.team is not None and self.player_id is None
@property
def is_player(self):
return self.player_id is not None
@property
def id(self):
if self.is_empty:
return 'Empty'
elif self.is_defense:
return self.team
else:
return self.player_id
@property
def name(self):
return self.id if not self.player else self.player.full_name
def __str__(self):
if self.game is not None and self.game.is_playing:
playing = '*'
else:
playing = ' '
return '%-6s %-4s %-20s %s%0.2f' \
% (self.position, self.team, self.name, playing, self.points)
class Provider (object):
"""
This class describes the interface that each fantasy football
provider must implement so that it can work with nflfan. In other
words, this is an abstract base class that should **not** be
instantiated directly.
All public members of this class must also be defined in each
provider implementation, including the class variables.
"""
provider_name = None
"""The name of the provider used in the configuration file."""
conf_required = ['scoring', 'league_name', 'season', 'phase', 'league_id']
"""A list of fields required for every provider."""
conf_optional = ['me']
"""A list of fields that are optional for every provider."""
def __init__(self, lg):
self._lg = lg
self._session = requests.Session()
self._session.headers.update(getattr(self, '_headers', {}))
def owners(self):
"""Returns a list of `nflfan.Owner` objects."""
assert False, 'subclass responsibility'
def matchups(self, week):
"""
Given a week number, this returns a list of `nflfan.Matchup`
objects describing the head-to-head matchups for `week`.
"""
assert False, 'subclass responsibility'
def roster(self, player_search, owner, week):
"""
Given a `nflfan.Owner` and a week number, this returns a
`nflfan.Roster` object. The `nflfan.Roster` contains a list of
`nfldb.Player` objects and their corresponding position on the
roster.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
Note that the roster position is a string but the set of
possible values is provider dependent. It is used for display
purposes only.
"""
assert False, 'subclass responsibility'
def save(self, fp, player_search, week):
"""
Writes a JSON encoding of all the owners, matchups and rosters
for the given week to a file at `fp`.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
"""
d = {
'owners': self.owners(),
'matchups': self.matchups(week),
}
# I'm hoping this doesn't hurt custom providers that don't need
# to do IO to fetch a roster.
def roster(owner):
return self.roster(player_search, owner, week)
# pool = multiprocessing.pool.ThreadPool(3)
# d['rosters'] = pool.map(roster, d['owners'])
d['rosters'] = map(roster, d['owners'])
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
json.dump(d, open(fp, 'w+'))
def _request(self, url):
eprint('download %s' % url)
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
self._login()
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
raise IOError("Authentication failure.")
return r
def _login(self):
assert self._login_url is not None
soup = BeautifulSoup(self._session.get(self._login_url).text,
'html.parser')
if not self._login_form(soup):
# Already logged in!
return
form = self._login_form(soup)
params = self._login_params(soup)
for inp in soup.select('#hiddens input[type="hidden"]'):
params[inp['name']] = inp['value']
r = self._session.post('https://login.yahoo.com' + form['action'],
params=params)
return BeautifulSoup(r.text, 'html.parser')
def _login_params(self):
assert False, 'subclass responsibility'
def _login_form(self, soup):
assert False, 'subclass responsibility'
def __str__(self):
return self.__class__.provider_name
class Yahoo (Provider):
provider_name = 'yahoo'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'https://login.yahoo.com/config/login'
def __init__(self, lg):
super(Yahoo, self).__init__(lg)
_, _, self._league_num = self._lg.ident.split('.')
def owners(self):
match_owner_link = re.compile('team-[0-9]+-name')
url = _urls['yahoo']['owner'] % self._league_num
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for link in soup.find_all(id=match_owner_link):
ident = self._owner_id_from_url(link['href'])
owners.append(Owner(ident, link.text.strip()))
return owners
def matchups(self, week):
mk_owner = lambda div: Owner(owner_id(div.a['href']), div.text.strip())
owner_id = self._owner_id_from_url
url = _urls['yahoo']['matchup'] % (self._league_num, week)
rjson = self._request(url).json()
soup = BeautifulSoup(rjson['content'], 'html.parser')
matchups = []
for matchup in soup.find('ul').children:
pair = list(matchup.find_all('div', class_='Fz-sm'))
if len(pair) == 1:
matchups.append(Matchup(mk_owner(pair[0]), None))
else:
matchups.append(Matchup(mk_owner(pair[0]), mk_owner(pair[1])))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
return row.td.find(class_='pos-label')['data-pos'].strip().upper()
def to_name(row):
return row.find(class_='ysf-player-name').a.text.strip()
def to_team(row):
team_pos = row.find(class_='ysf-player-name').span.text.strip()
return nfldb.standard_team(re.search('^\S+', team_pos).group(0))
def rplayer(r, name, team, pos):
bench = pos == 'BN'
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif nfldb.standard_team(name) != 'UNK':
return r.new_player(pos, team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
match_table_id = re.compile('^statTable[0-9]+$')
url = _urls['yahoo']['roster'] % (self._league_num, owner.ident, week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for table in soup.find_all(id=match_table_id):
for row in table.tbody.find_all('tr', recursive=False):
pos = to_pos(row)
try:
team, name = to_team(row), to_name(row)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('%s/([0-9]+)' % self._league_num, url).group(1)
def _login(self):
soup = super(Yahoo, self)._login()
if self._login_form(soup):
err_div = soup.find(id='mbr-login-error')
err_msg = 'Unknown error.'
if err_div:
err_msg = err_div.text.strip()
raise IOError('Login failed: %s' % err_msg)
def _login_params(self, soup):
return {
'username': self._lg.conf.get('username', ''),
'passwd': self._lg.conf.get('password', ''),
'signin': '',
# '.persistent': 'y',
'countrycode': '1',
# '_crumb': '8cSELfo475z',
# '_ts': str(int(time.time())),
# '_format': '',
# '_uuid': 'Q9JF85iYg9ax',
# '_seqid': '2',
# 'otp_channel': '',
}
def _login_form(self, soup):
return soup.find('form', id='mbr-login-form')
class ESPN (Provider):
provider_name = 'espn'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'http://games.espn.go.com/ffl/signin?_=_'
def owners(self):
url = _urls['espn']['owner'].format(
league_id=self._lg.ident, season_id=self._lg.season)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for td in soup.select('tr.ownerRow td.teamName'):
ident = self._owner_id_from_url(td.a['href'])
owners.append(Owner(ident, td.text.strip()))
return owners
def matchups(self, week):
owner_id = self._owner_id_from_url
url = _urls['espn']['matchup'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
matchupDiv = soup.find(id='scoreboardMatchups')
matchups = []
for table in matchupDiv.select('table.matchup'):
t1, t2 = list(table.find_all(class_='name'))
id1, id2 = owner_id(t1.a['href']), owner_id(t2.a['href'])
name1, name2 = t1.a.text.strip(), t2.a.text.strip()
o1, o2 = Owner(id1, name1), Owner(id2, name2)
matchups.append(Matchup(o1, o2))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
pos = row.find(class_='playerSlot').text.strip().upper()
if pos == 'BENCH':
return 'BN'
return pos
def to_name(row):
name = row.find(class_='playertablePlayerName').a.text.strip()
# If this is the defense, apparently 'D/ST' is included in
# the name. Wtf?
return re.sub('\s+D/ST$', '', name)
def to_team(row):
tpos = row.find(class_='playertablePlayerName').a.next_sibling
tpos = tpos.strip(' \r\n\t*,|').upper()
# This is a little weird because the team name seems to run
# in with the position. Perhaps a weird encoding quirk?
if len(tpos) < 2:
return 'UNK'
elif len(tpos) == 2:
return nfldb.standard_team(tpos)
else:
team = nfldb.standard_team(tpos[0:3])
if team == 'UNK':
team = nfldb.standard_team(tpos[0:2])
return team
def rplayer(r, name, team, pos):
bench = pos == 'BN'
name_team = nfldb.standard_team(name)
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif name_team != 'UNK':
return r.new_player(pos, name_team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
url = _urls['espn']['roster'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week,
team_id=owner.ident)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for tr in soup.select('tr.pncPlayerRow'):
if tr.get('id', '') == 'pncEmptyRow':
continue
pos = to_pos(tr)
try:
team, name = to_team(tr), to_name(tr)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('teamId=([0-9]+)', url).group(1)
def _login(self):
soup = super(ESPN, self)._login()
if self._login_form(soup):
err_msg = []
for msg in soup.find_all('font', color='#ff0000'):
err_msg.append(msg.text.strip())
err_msg = '\n'.join(err_msg) if err_msg else 'Unknown error.'
raise IOError('Login failed: %s' % err_msg)
def _login_params(self):
return {
'username': self._lg.conf.get('username', ''),
'password': self._lg.conf.get('password', ''),
'submit': 'Sign In',
}
def _login_form(self, soup):
return soup.find('form', attrs={'name': 'loginForm'})
| {
"repo_name": "BurntSushi/nflfan",
"path": "nflfan/provider.py",
"copies": "1",
"size": "24616",
"license": "unlicense",
"hash": 1224262221576702000,
"line_mean": 33.1888888889,
"line_max": 121,
"alpha_frac": 0.562520312,
"autogenerated": false,
"ratio": 3.7173059498640892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4779826261864089,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple, OrderedDict
import csv
import datetime as dt
from io import open
import os.path
import pickle
import re
import sys
import numpy as np
import pandas as pd
from caar.pandas_tseries_tools import _guess_datetime_format
from caar.configparser_read import SENSOR_FIELDS, \
GEOSPATIAL_FIELDS, SENSOR_ZIP_CODE, SENSOR_DEVICE_ID, \
POSTAL_FILE_ZIP, POSTAL_TWO_LETTER_STATE, SENSOR_LOCATION_ID, \
SENSOR_ID_FIELD, UNIQUE_CYCLE_FIELD_INDEX, UNIQUE_GEOSPATIAL_FIELD, \
CYCLE_TYPE_INDEX, CYCLE_START_INDEX, CYCLE_END_TIME_INDEX, \
SENSORS_LOG_DATE_INDEX, SENSORS_DATA_INDEX, SENSOR_ID_INDEX, \
GEOSPATIAL_LOG_DATE_INDEX, GEOSPATIAL_OBSERVATION_INDEX, CYCLE_FIELDS, \
CYCLE_ID_INDEX, GEOSPATIAL_ID_INDEX
from future import standard_library
standard_library.install_aliases()
Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode', 'start_time'])
Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp'])
Geospatial = namedtuple('Geospatial', ['location_id', 'timestamp'])
def dict_from_file(raw_file, cycle=None, states=None,
sensors_file=None, postal_file=None, auto=None,
id_col_heading=None, cycle_col_heading=None, encoding='UTF-8',
delimiter=None, quote=None, cols_to_ignore=None, meta=False):
"""Read delimited text file and create dict of dicts. One dict within the dict has the key 'cols_meta' and contains metadata. The other has the key 'records'. The records keys are named 2-tuples containing numeric IDs and time stamps (and cycle mode if a cycle mode is chosen with the argument 'cycle=', for cycling data). The values are either single values (floats, ints or strings) or tuples of these types.
See the example .csv data files at https://github.com/nickpowersys/caar.
Example sensor cycle file column headings: DeviceId, CycleType, StartTime, EndTime.
Example sensor file column headings: SensorId, TimeStamp, Degrees.
Example outside temperature file column headings LocationId, TimeStamp, Degrees.
Common delimited text file formats including commas, tabs, pipes and spaces are detected in
that order within the data rows (the header has its own delimiter detection and is handled separately,
automatically) and the first delimiter detected is used. In all cases, rows are only used if the
number of values match the number of column headings in the first row.
Each input file is expected to have (at least) columns representing ID's, time stamps (or
starting and ending time stamps for cycles), and (if not cycles) corresponding observations.
To use the automatic column detection functionality, use the keyword argument 'auto' and
assign it one of the values: 'cycles', 'sensors', or 'geospatial'.
The ID's should contain both letters and digits in some combination (leading zeroes are also
allowed in place of letters). Having the string 'id', 'Id' or 'ID' will then cause a column
to be the ID index within the combined ID-time stamp index for a given input file. If there
is no such heading, the leftmost column with alphanumeric strings (for example, 'T12' or
'0123') will be taken as the ID.
The output can be filtered on records from a state or set of states by specifying a
comma-delimited string containing state abbreviations. Otherwise, all available records
will be in the output.
If a state or states are specified, a sensors metadata file and postal
code file must be specified in the arguments and have the same location ID columns
and ZipCode/PostalCode column headings in the same left-to-right order as in the examples.
For the other columns, dummy values may be used if there is no actual data.
Args:
raw_file (str): The input file.
cycle (Optional[str]): The type of cycling operation that will be included in the output. For example, possible values that may be in the data file are 'Cool' or 'Heat'. If no specific value is specified as an argument, all operating modes will be included.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for zip codes, with zip codes, their state, and other geographic information. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order of columns in the delimited file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cycle_col_heading (Optional[str]): Indicates the heading in the header for the cycle mode column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
meta (Optional[bool]): An alternative way to return metadata about columns, besides the detect_columns() function. To use it, meta must be True, and a dict of metadata will be returned instead of a dict of records.
Returns:
clean_dict (dict): Dict.
"""
kwargs = dict([('states', states), ('sensors_file', sensors_file),
('cycle', cycle), ('postal_file', postal_file),
('auto', auto), ('delimiter', delimiter), ('quote', quote),
('meta', meta), ('id_col_heading', id_col_heading),
('encoding', encoding)])
if isinstance(meta, bool):
pass
else:
raise ValueError('meta argument must be either False or True.')
if states:
try:
assert kwargs.get('sensors_file'), kwargs.get('postal_file')
except ValueError:
_missing_sensors_or_postal_error_message()
header_kwargs = dict([('encoding', encoding), ('delimiter', delimiter),
('id_col_heading', id_col_heading), ('quote', quote),
('auto', auto), ('cycle', cycle)])
header, id_index = _header_and_id_col_if_heading_or_preconfig(raw_file,
**header_kwargs)
skwargs = dict([('encoding', encoding), ('delimiter', delimiter),
('quote', quote), ('cycle', cycle),
('id_col', id_index), ('auto', auto),
('cols_to_ignore', cols_to_ignore),
('cycle_col_heading', cycle_col_heading)])
# If delimiter and/or quote were not specified as kwargs,
# they will be set by call to _analyze_all_columns()
cols_meta, delim, quote = _analyze_all_columns(raw_file, header,
**skwargs)
if meta:
return cols_meta
else:
for k, v in [('cols_meta', cols_meta), ('delimiter', delim),
('quote', quote), ('header', header)]:
kwargs[k] = v
records = _dict_from_lines_of_text(raw_file, **kwargs)
for col, col_meta in cols_meta.items():
if col_meta['type'] == 'numeric_commas':
col_meta['type'] == 'ints'
container = {'cols_meta': cols_meta, 'records': records}
return container
def columns_summary(raw_file, cycle=None, states=None,
sensors_file=None, postal_file=None, auto=None,
encoding='UTF-8', delimiter=None, quote=None,
id_col_heading=None, cycle_col_heading=None,
cols_to_ignore=None):
"""Returns pandas DataFrame that summarizes the columns detected in the
raw file: the headings, the positions, and types that are consistent with
the actual data (ints, floats, alphabetic ('alpha_only'), time, and zip
codes. This function helps the user to avoid having to open a large
text file in an editor.
This set of columns will be in dict based on dict_from_file() or pickle_from_file() and corresponding keyword arguments ('auto' is required, and must be a value other than None).
Args:
raw_file (str): The input file.
cycle (Optional[str]): The type of cycle that will be in the output. For example, example values that may be in the data file are 'Cool' and/or 'Heat'. If no specific value is specified as an argument, all modes will be in the output.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order of columns in the delimited file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cycle_col_heading (Optional[str]): Indicates the heading in the header for the cycle column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
pandas DataFrame
"""
columns = detect_columns(raw_file, cycle=cycle, states=states,
sensors_file=sensors_file,
postal_file=postal_file, auto=auto,
encoding=encoding, delimiter=delimiter,
quote=quote, id_col_heading=id_col_heading,
cycle_col_heading=cycle_col_heading,
cols_to_ignore=cols_to_ignore)
df = pd.DataFrame(columns)
return df
def detect_columns(raw_file, cycle=None, states=None,
sensors_file=None, postal_file=None, auto=None,
encoding='UTF-8', delimiter=None, quote=None,
id_col_heading=None, cycle_col_heading=None,
cols_to_ignore=None):
"""Returns dict with columns that will be in dict based on dict_from_file() or pickle_from_file() and corresponding keyword arguments ('auto' is required, and must be a value other than None).
Args:
raw_file (str): The input file.
cycle (Optional[str]): The type of cycle that will be in the output. For example, example values that may be in the data file are 'Cool' and/or 'Heat'. If no specific value is specified as an argument, all modes will be in the output.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order of columns in the delimited file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cycle_col_heading (Optional[str]): Indicates the heading in the header for the cycle column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
column_dict (dict): Dict in which keys are one of: 'id_col', 'start_time_col', 'end_time_col', 'cycle_col', (the latter three are for cycles data only), 'time_col', or the headings of other columns found in the file. The values are dicts.
"""
kwargs = dict([('meta', True), ('cycle', cycle), ('states', states),
('sensors_file', sensors_file),
('postal_file', postal_file), ('auto', auto),
('encoding', encoding), ('delimiter', delimiter),
('quote', quote), ('id_col_heading', id_col_heading),
('cycle_col_heading', cycle_col_heading),
('cols_to_ignore', cols_to_ignore)])
col_meta = dict_from_file(raw_file, **kwargs)
sorted_meta = _sort_meta_in_col_order(col_meta)
return sorted_meta
def _sort_meta_in_col_order(meta):
sorted_meta = OrderedDict()
for i in range(len(meta)):
for k, v in meta.items():
if v['position'] == i:
sorted_meta[k] = v
return sorted_meta
def sensor_text_to_binary(raw_file, picklepath=None, states=None,
sensors_file=None, postal_file=None,
auto='sensors', id_col_heading=None,
cols_to_ignore=None, encoding='UTF-8',
delimiter=None, quote=None):
"""Read delimited text file and create binary pickle file containing a dict of records. The keys are named tuples containing numeric IDs (strings) and time stamps.
See the example .csv data files at https://github.com/nickpowersys/caar.
Example sensor cycle file column headings: DeviceId, CycleType, StartTime, EndTime.
Example sensors file column headings: SensorId, TimeStamp, Degrees.
Example geospatial data file column headings LocationId, TimeStamp, Degrees.
Common delimited text file formats including commas, tabs, pipes and spaces are detected in
that order within the data rows (the header has its own delimiter detection and is handled separately,
automatically) and the first delimiter detected is used. In all cases, rows
are only used if the number of values match the number of column headings in the first row.
Each input file is expected to have (at least) columns representing ID's, time stamps (or
starting and ending time stamps for cycles), and (if not cycles) corresponding observations.
To use the automatic column detection functionality, use the keyword argument 'auto' and
assign it one of the values: 'cycles', 'sensors', or 'geospatial'.
The ID's should contain both letters and digits in some combination (leading zeroes are also
allowed in place of letters). Having the string 'id', 'Id' or 'ID' will then cause a column
to be the ID index within the combined ID-time stamp index for a given input file. If there
is no such heading, the leftmost column with alphanumeric strings (for example, 'T12' or
'0123') will be taken as the ID.
The output can be filtered on records from a state or set of states by specifying a
comma-delimited string containing state abbreviations. Otherwise, all available records
will be in the output.
If a state or states are specified, a sensors metadata file and postal
code file must be specified in the arguments and have the same location ID columns
and ZipCode/PostalCode column headings in the same left-to-right order as in the examples.
For the other columns, dummy values may be used if there is no actual data.
Args:
raw_file (str): The input file.
picklepath (str): The path of the desired pickle file. If it is not specified, a filename is generated automatically.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'sensors', None} If not None, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order and headings of columns in the delimited text file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
picklepath (str): Path of output file.
"""
return pickle_from_file(raw_file, picklepath=picklepath, states=states,
sensors_file=sensors_file, postal_file=postal_file,
auto=auto, id_col_heading=id_col_heading,
cols_to_ignore=cols_to_ignore, encoding=encoding,
delimiter=delimiter, quote=quote)
def cycles_text_to_binary(raw_file, picklepath=None, cycle=None, states=None,
sensors_file=None, postal_file=None, auto='cycles',
id_col_heading=None, cycle_col_heading=None,
cols_to_ignore=None, encoding='UTF-8', delimiter=None,
quote=None):
"""Read delimited text file and create binary pickle file containing a dict of records. The keys are named tuples containing numeric IDs (strings) and time stamps.
See the example .csv data files at https://github.com/nickpowersys/caar.
Example sensor cycle file column headings: DeviceId, CycleType, StartTime, EndTime.
Example sensors file column headings: SensorId, TimeStamp, Degrees.
Example geospatial data file column headings LocationId, TimeStamp, Degrees.
Common delimited text file formats including commas, tabs, pipes and spaces are detected in
that order within the data rows (the header has its own delimiter detection and is handled separately,
automatically) and the first delimiter detected is used. In all cases, rows
are only used if the number of values match the number of column headings in the first row.
Each input file is expected to have (at least) columns representing ID's, time stamps (or
starting and ending time stamps for cycles), and (if not cycles) corresponding observations.
To use the automatic column detection functionality, use the keyword argument 'auto' and
assign it one of the values: 'cycles', 'sensors', or 'geospatial'.
The ID's should contain both letters and digits in some combination (leading zeroes are also
allowed in place of letters). Having the string 'id', 'Id' or 'ID' will then cause a column
to be the ID index within the combined ID-time stamp index for a given input file. If there
is no such heading, the leftmost column with alphanumeric strings (for example, 'T12' or
'0123') will be taken as the ID.
The output can be filtered on records from a state or set of states by specifying a
comma-delimited string containing state abbreviations. Otherwise, all available records
will be in the output.
If a state or states are specified, a sensors metadata file and postal
code file must be specified in the arguments and have the same location ID columns
and ZipCode/PostalCode column headings in the same left-to-right order as in the examples.
For the other columns, dummy values may be used if there is no actual data.
Args:
raw_file (str): The input file.
picklepath (str): The path of the desired pickle file. If it is not specified, a filename is generated automatically.
cycle (Optional[str]): The type of cycle that will be in the output. For example, example values that may be in the data file are either 'Cool' or 'Heat'. If left as None, all cycles will be in the output.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order and headings of columns in the delimited text file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cycle_col_heading (Optional[str]): Indicates the heading in the header for the cycle column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
picklepath (str): Path of output file.
"""
return pickle_from_file(raw_file, picklepath=picklepath, cycle=cycle,
states=states, sensors_file=sensors_file,
postal_file=postal_file, auto=auto,
id_col_heading=id_col_heading,
cycle_col_heading=cycle_col_heading,
cols_to_ignore=cols_to_ignore, encoding=encoding,
delimiter=delimiter, quote=quote)
def geospatial_text_to_binary(raw_file, picklepath=None,
states=None, sensors_file=None,
postal_file=None, auto='geospatial',
id_col_heading=None, cols_to_ignore=None,
encoding='UTF-8', delimiter=None,
quote=None):
"""Read delimited text file and create binary pickle file containing a dict of records. The keys are named tuples containing numeric IDs (strings) and time stamps.
See the example .csv data files at https://github.com/nickpowersys/caar.
Example sensor cycle file column headings: DeviceId, CycleType, StartTime, EndTime.
Example sensors file column headings: SensorId, TimeStamp, Degrees.
Example geospatial data file column headings LocationId, TimeStamp, Degrees.
Common delimited text file formats including commas, tabs, pipes and spaces are detected in
that order within the data rows (the header has its own delimiter detection and is handled separately,
automatically) and the first delimiter detected is used. In all cases, rows
are only used if the number of values match the number of column headings in the first row.
Each input file is expected to have (at least) columns representing ID's, time stamps (or
starting and ending time stamps for cycles), and (if not cycles) corresponding observations.
To use the automatic column detection functionality, use the keyword argument 'auto' and
assign it one of the values: 'cycles', 'sensors', or 'geospatial'.
The ID's should contain both letters and digits in some combination (leading zeroes are also
allowed in place of letters). Having the string 'id', 'Id' or 'ID' will then cause a column
to be the ID index within the combined ID-time stamp index for a given input file. If there
is no such heading, the leftmost column with alphanumeric strings (for example, 'T12' or
'0123') will be taken as the ID.
The output can be filtered on records from a state or set of states by specifying a
comma-delimited string containing state abbreviations. Otherwise, all available records
will be in the output.
If a state or states are specified, a sensors metadata file and postal
code file must be specified in the arguments and have the same location ID columns
and ZipCode/PostalCode column headings in the same left-to-right order as in the examples.
For the other columns, dummy values may be used if there is no actual data.
Args:
raw_file (str): The input file.
picklepath (str): The path of the desired pickle file. If it is not specified, a filename is generated automatically.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order and headings of columns in the delimited text file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
picklepath (str): Path of output file.
"""
return pickle_from_file(raw_file, picklepath=picklepath,
states=states, sensors_file=sensors_file,
postal_file=postal_file, auto=auto,
id_col_heading=id_col_heading,
cols_to_ignore=cols_to_ignore, encoding=encoding,
delimiter=delimiter, quote=quote)
def pickle_from_file(raw_file, picklepath=None, cycle=None, states=None,
sensors_file=None, postal_file=None, auto=None,
id_col_heading=None, cycle_col_heading=None,
cols_to_ignore=None, encoding='UTF-8', delimiter=None,
quote=None):
"""Read delimited text file and create binary pickle file containing a dict of records. The keys are named tuples containing numeric IDs (strings) and time stamps.
See the example .csv data files at https://github.com/nickpowersys/caar.
Example sensor cycle file column headings: DeviceId, CycleType, StartTime, EndTime.
Example sensors file column headings: SensorId, TimeStamp, Degrees.
Example geospatial data file column headings LocationId, TimeStamp, Degrees.
Common delimited text file formats including commas, tabs, pipes and spaces are detected in
that order within the data rows (the header has its own delimiter detection and is handled separately,
automatically) and the first delimiter detected is used. In all cases, rows
are only used if the number of values match the number of column headings in the first row.
Each input file is expected to have (at least) columns representing ID's, time stamps (or
starting and ending time stamps for cycles), and (if not cycles) corresponding observations.
To use the automatic column detection functionality, use the keyword argument 'auto' and
assign it one of the values: 'cycles', 'sensors', or 'geospatial'.
The ID's should contain both letters and digits in some combination (leading zeroes are also
allowed in place of letters). Having the string 'id', 'Id' or 'ID' will then cause a column
to be the ID index within the combined ID-time stamp index for a given input file. If there
is no such heading, the leftmost column with alphanumeric strings (for example, 'T12' or
'0123') will be taken as the ID.
The output can be filtered on records from a state or set of states by specifying a
comma-delimited string containing state abbreviations. Otherwise, all available records
will be in the output.
If a state or states are specified, a sensors metadata file and postal
code file must be specified in the arguments and have the same location ID columns
and ZipCode/PostalCode column headings in the same left-to-right order as in the examples.
For the other columns, dummy values may be used if there is no actual data.
Args:
raw_file (str): The input file.
picklepath (str): The path of the desired pickle file. If it is not specified, a filename is generated automatically.
cycle (Optional[str]): The type of cycle that will be in the output. For example, example values that may be in the data file are either 'Cool' or 'Heat'. If left as None, all cycles will be in the output.
states (Optional[str]): One or more comma-separated, two-letter state abbreviations.
sensors_file (Optional[str]): Path of metadata file for sensors. Required if there is a states argument.
postal_file (Optional[str]): Metadata file for postal codes. Required if there is a states argument.
auto (Optional[Boolean]): {'cycles', 'sensors', 'geospatial', None} If one of the data types is specified, the function will detect which columns contain IDs, time stamps and values of interest automatically. If None (default), the order and headings of columns in the delimited text file and the config.ini file should match.
id_col_heading (Optional[str]): Indicates the heading in the header for the ID column.
cycle_col_heading (Optional[str]): Indicates the heading in the header for the cycle column.
cols_to_ignore (Optional[iterable of [str] or [int]]): Column headings or 0-based column indexes that should be left out of the output.
encoding (Optional[str]): Encoding of the raw data file. Default: 'UTF-8'.
delimiter (Optional[str]): Character to be used as row delimiter. Default is None, but commas, tabs, pipes and spaces are automatically detected in that priority order) if no delimiter is specified.
quote (Optional[str]): Characters surrounding data fields. Default is none, but double and single quotes surrounding data fields are automatically detected and removed if they are present in the data rows. If any other character is specified in the keyword argument, and it surrounds data in any column, it will be removed instead.
Returns:
picklepath (str): Path of output file.
"""
if states:
try:
assert sensors_file is not None, postal_file is not None
except ValueError:
_missing_sensors_or_postal_error_message()
return 0
kwargs = dict([('states', states), ('sensors_file', sensors_file),
('cycle', cycle), ('postal_file', postal_file),
('auto', auto), ('id_col_heading', id_col_heading),
('cycle_col_heading', cycle_col_heading),
('cols_to_ignore', cols_to_ignore), ('encoding', encoding),
('delimiter', delimiter), ('quote', quote), ('meta', meta)])
records_or_meta = dict_from_file(raw_file, **kwargs)
# Due to testing and the need of temporary directories,
# need to convert LocalPath to string
if picklepath is None:
picklepath = _pickle_filename(raw_file, states=states, auto=auto,
encoding=encoding)
if '2.7' in sys.version:
str_picklepath = unicode(picklepath)
else:
str_picklepath = str(picklepath)
with open(str_picklepath, 'wb') as fout:
pickle.dump(records_or_meta, fout, pickle.HIGHEST_PROTOCOL)
return str_picklepath
def _pickle_filename(text_file, states=None, auto=None,
encoding='UTF-8', delimiter=None, quote=None):
"""Automatically generate file name based on state(s) and content.
Takes a string with two-letter abbreviations for states separated by
commas. If all states are desired, states_to_clean should be None.
"""
header, _ = _header_and_id_col_if_heading_or_preconfig(text_file,
encoding=encoding,
delimiter=delimiter,
quote=quote)
data_type = auto if auto else _data_type_matching_header(header)
if states:
states = states.split(',')
else:
states = ['all_states']
if '2.7' in sys.version:
py_version = 'py27'
filename = '_'.join(states + [data_type, py_version]) + '.pickle'
else:
filename = '_'.join(states + [data_type]) + '.pickle'
return filename
def _dict_from_lines_of_text(raw_file, **kwargs):
"""Returns a tuple containing a dict of column meta-data and a dict of records
whose keys and values correspond to 1) operating status switching events, 2) sensor data
or 3) geospatial data. The keys of headers_functions are
tuples containing strings with the column headings from the raw text files.
"""
if kwargs.get('auto'):
# Detect columns containing ID, cool/heat mode and time automatically
data_func_map = {'sensors': _clean_sensors_auto_detect,
'cycles': _clean_cycles_auto_detect,
'geospatial': _clean_geospatial_auto_detect}
data = kwargs.get('auto')
try:
cleaning_function = data_func_map[data]
except ValueError:
print('The data type ' + data + ' is not recognized')
else:
# Use file definition from config.ini file to specify column headings
config_cols_func_map = {SENSOR_FIELDS: _clean_sensors,
CYCLE_FIELDS: _clean_cycles,
GEOSPATIAL_FIELDS: _clean_geospatial}
header = kwargs.get('header')
try:
cleaning_function = config_cols_func_map[header]
except KeyError:
print('Header not matched with headers in config.ini file.')
records = cleaning_function(raw_file, **kwargs)
return records
def _clean_cycles_auto_detect(raw_file, **kwargs):
args = ['header', 'delimiter', 'cols_meta', 'cycle', 'quote', 'encoding']
header, delimiter, cols_meta, cycle_mode, quote, encoding = (kwargs.get(k)
for k in args)
clean_args = [raw_file, header, delimiter, cols_meta]
thermos_ids = _sensors_ids_in_states(**kwargs)
clean_kwargs = {'cycle_mode': cycle_mode, 'thermos_ids': thermos_ids,
'quote': quote, 'encoding': encoding}
clean_records = _validate_cycle_records_add_to_dict_auto(*clean_args,
**clean_kwargs)
return clean_records
def _validate_cycle_records_add_to_dict_auto(raw_file, header, delimiter,
cols_meta, cycle_mode=None,
thermos_ids=None,
quote=None, encoding=None):
clean_records = {}
id_col, start_time_col = (cols_meta[k]['position'] for k in ['id',
'start_time'])
id_is_int = _id_is_int(cols_meta)
cycle_col = (cols_meta['cycle']['position'] if cols_meta.get('cycle')
else None)
dt_args = [raw_file, start_time_col, encoding, delimiter, quote, header]
datetime_format = _guess_datetime_format_from_first_record(*dt_args)
data_cols = _non_index_col_types(cols_meta, dt_format=datetime_format)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and _validate_cycles_auto_record(record, id_col,
ids=thermos_ids,
cycle_mode=cycle_mode,
cycle_col=cycle_col):
id_val = _id_val(record, id_col, id_is_int)
start_dt = _to_datetime(record[start_time_col],
dt_format=datetime_format)
# Cycle named tuple declaration is global, in order to ensure
# that named tuples using it can be pickled.
# Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode',
# 'start_time'])
multiidcols = Cycle(device_id=id_val, cycle_mode=cycle_mode,
start_time=start_dt)
end_time_and_other_col_vals = _record_vals(record, data_cols)
clean_records[multiidcols] = end_time_and_other_col_vals
return clean_records
def _guess_datetime_format_from_first_record(raw_file, time_col, encoding,
delimiter, quote, header):
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record:
time = record[time_col]
datetime_format = _guess_datetime_format(time)
break
return datetime_format
def _validate_cycles_auto_record(record, id_col, ids=None, cycle_mode=None,
cycle_col=None):
"""Validate that record ID is in the set of IDs (if any specified), and
that the cycle type matches the specified value (if any has been specified).
"""
return all([_validate_cycle_mode(record[cycle_col], cycle_mode),
_validate_id(record[id_col], ids)])
def _clean_sensors_auto_detect(raw_file, **kwargs):
args = ['header', 'delimiter', 'cols_meta', 'quote', 'encoding']
header, delimiter, cols_meta, quote, encoding = (kwargs.get(k)
for k in args)
clean_args = [raw_file, header, delimiter, cols_meta]
thermos_ids = _sensors_ids_in_states(**kwargs)
clean_kwargs = {'thermos_ids': thermos_ids, 'quote': quote,
'encoding': encoding}
clean_records = _validate_sensors_add_to_dict_auto(*clean_args,
**clean_kwargs)
return clean_records
def _validate_sensors_add_to_dict_auto(raw_file, header, delimiter, cols_meta,
thermos_ids=None, quote=None,
encoding=None):
clean_records = {}
id_col, time_col = (cols_meta[k]['position'] for k in ['id', 'time'])
id_is_int = _id_is_int(cols_meta)
dt_args = [raw_file, time_col, encoding, delimiter, quote, header]
datetime_format = _guess_datetime_format_from_first_record(*dt_args)
data_cols = _non_index_col_types(cols_meta, dt_format=datetime_format)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and _validate_sensors_auto_record(record, id_col,
ids=thermos_ids):
# Sensor named tuple declaration is global, in order to ensure
# that named tuples using it can be pickled.
# Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp'])
id_val = _id_val(record, id_col, id_is_int)
time = _to_datetime(record[time_col],
dt_format=datetime_format)
multiidcols = Sensor(sensor_id=id_val,
timestamp=time)
temp_and_other_vals = _record_vals(record, data_cols)
clean_records[multiidcols] = temp_and_other_vals
return clean_records
def _non_index_col_types(cols_meta, dt_format=None):
"""Return a list of 2-tuples for non-index data columns.
The first element of each tuple is the column index, and the second is
a primitive type (int or float), function or None. The primitive type or
function will be used to change the types of each data element (or if
None, leave them as strings). The list is sorted ascending in the order
of the positions of columns."""
if format:
if '2.7' in sys.version:
_to_datetime.func_defaults = (dt_format,)
else:
_to_datetime.__defaults__ = (dt_format,)
data_cols = set([meta['position'] for k, meta in cols_meta.items() if
k not in ['id', 'time', 'cycle', 'start_time']])
type_map = dict([('ints', int), ('floats', float),
('time', _to_datetime),
('numeric_commas', _remove_commas_from_int)])
data_cols_types = dict([(meta['position'], type_map[meta['type']])
for meta in cols_meta.values()
if meta['position'] in data_cols and
meta['type'] in type_map])
cols_types = []
for col in data_cols:
if col in data_cols_types:
cols_types.append((col, data_cols_types[col]))
elif col in data_cols:
cols_types.append((col, None))
return cols_types
def _to_datetime(date_str, dt_format=None):
return pd.to_datetime(date_str, format=dt_format).to_pydatetime()
def _remove_commas_from_int(numeric_string):
return int(numeric_string.replace(',', ''))
def _validate_sensors_auto_record(record, id_col, ids=None):
"""Validate that standardized record has expected data content.
"""
return _validate_id(record[id_col], ids)
def _clean_geospatial_auto_detect(raw_file, **kwargs):
args = ['header', 'delimiter', 'cols_meta', 'quote', 'encoding']
header, delimiter, cols_meta, quote, encoding = (kwargs.get(k)
for k in args)
location_ids = _locations_in_states(**kwargs)
clean_args = [raw_file, header, delimiter, cols_meta]
clean_kwargs = {'location_ids': location_ids, 'quote': quote,
'encoding': encoding}
clean_records = _validate_geospatial_add_to_dict_auto(*clean_args,
**clean_kwargs)
return clean_records
def _validate_geospatial_add_to_dict_auto(raw_file, header, delimiter, cols_meta,
location_ids=None, quote=None,
encoding=None):
clean_records = {}
id_col, time_col = (cols_meta[k]['position'] for k in ['id', 'time'])
id_is_int = _id_is_int(cols_meta)
dt_args = [raw_file, time_col, encoding, delimiter, quote, header]
datetime_format = _guess_datetime_format_from_first_record(*dt_args)
data_cols = _non_index_col_types(cols_meta, dt_format=datetime_format)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and _validate_geospatial_auto_record(record, id_col,
ids=location_ids):
# Geospatial named tuple declared globally to enable pickling.
# The following is here for reference.
# Geospatial = namedtuple('Geospatial', ['location_id', 'timestamp'])
id_val = _id_val(record, id_col, id_is_int)
time = _to_datetime(record[time_col],
dt_format=datetime_format)
multiidcols = Geospatial(location_id=id_val,
timestamp=time)
temp_and_other_vals = _record_vals(record, data_cols)
clean_records[multiidcols] = temp_and_other_vals
return clean_records
def _validate_geospatial_auto_record(record, id_col, ids=None):
"""Validate that standardized record has expected data content.
"""
return _validate_id(record[id_col], ids)
def _record_vals(record, col_conversions):
record_vals = []
for col, convert_func in col_conversions:
if convert_func:
record_vals.append(convert_func(record[col]))
else:
record_vals.append(record[col])
if len(record_vals) > 1:
return tuple(record_vals)
else:
return record_vals[0]
def _analyze_all_columns(raw_file, header, encoding='UTF-8', delimiter=None,
quote=None, id_col=None, cycle=None, auto=None,
cols_to_ignore=None, cycle_col_heading=None):
"""Creates NumPy array with first 1,000 lines containing numeric data."""
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
delimiter, quote = _determine_delimiter_and_quote(lines, delimiter,
quote, auto=auto)
cycle_col = _detect_cycle_col(raw_file, header, cycle, delimiter,
auto=auto, encoding=encoding, quote=quote,
cycle_col_heading=cycle_col_heading)
sample_kwargs = {'quote': quote, 'encoding': encoding}
timestamp_cols = _detect_time_stamps(raw_file, header, delimiter,
**sample_kwargs)
data_cols = _detect_column_data_types(raw_file, header, timestamp_cols,
delimiter, cols_to_ignore,
**sample_kwargs)
id_other_cols = _detect_id_other_cols(raw_file, header, timestamp_cols,
data_cols, id_col=id_col,
cycle_col=cycle_col,
delimiter=delimiter,
encoding=encoding, quote=quote)
cols_meta = _create_col_meta(header, id_other_cols, timestamp_cols,
cols_to_ignore, cycle_col=cycle_col)
return cols_meta, delimiter, quote
def _select_sample_records(raw_file, header, encoding=None, delimiter=None,
quote=None):
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
delimiter, quote = _determine_delimiter_and_quote(lines, delimiter,
quote)
sample_records = []
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record:
sample_records.append(record)
else:
continue
if len(sample_records) == 1000:
break
sample_record_array = np.array(sample_records)
return sample_record_array, delimiter, quote
def _determine_delimiter_and_quote(lines, delimiter, quote, auto=None):
for i, line in enumerate(lines):
if not _contains_digits(line):
continue
if delimiter is None:
delimiter = _determine_delimiter(line, auto=auto)
if quote is None:
quote = _determine_quote(line)
if delimiter and quote:
break
if i == 100:
break
return delimiter, quote
def _record_has_all_expected_columns(record, header):
if len(record) == len(header) and all(record):
return True
else:
return False
def _detect_time_stamps(raw_file, header, delimiter, cycle_col=None,
quote=None, encoding=None):
"""Return column index of first and (for cycle data) second time stamp."""
first_time_stamp_col = None
second_time_stamp_col = None
with open(raw_file, encoding=encoding) as f:
_ = f.readline()
for line in f:
if _contains_digits(line):
record = _parse_line(line, delimiter,
quote)
if _record_has_all_expected_columns(record, header):
break
for col, val in enumerate(record):
if (any([':' in val, '/' in val, '-' in val]) and
_validate_time_stamp(val)):
if first_time_stamp_col is None and col != cycle_col:
first_time_stamp_col = col
elif col != cycle_col:
second_time_stamp_col = col
break
return [first_time_stamp_col, second_time_stamp_col]
def _detect_column_data_types(raw_file, header, timestamp_cols, delimiter,
cols_to_ignore, quote=None, encoding='UTF-8'):
"""Returns dict containing lists of column indexes that are not assigned
as the ID column, cycle column, or time stamp.
"""
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
columns_to_detect = _non_time_cols(header, timestamp_cols, cols_to_ignore)
grouped = _determine_types_of_non_time_cols(columns_to_detect, lines, delimiter,
quote, header)
return grouped
def _record_from_line(line, delimiter, quote, header):
if _contains_digits(line):
record = _parse_line(line, delimiter, quote)
if _record_has_all_expected_columns(record, header):
return record
else:
return None
else:
return None
def _non_time_cols(header, timestamp_cols, cols_to_ignore):
time_columns = set(_timestamp_columns(timestamp_cols))
if cols_to_ignore is None:
ignoring = set()
elif isinstance(cols_to_ignore[0], int):
ignoring = set(cols_to_ignore)
elif isinstance(cols_to_ignore[0], str):
col_indexes_ignoring = []
for heading in cols_to_ignore:
col_indexes_ignoring.append(_column_index_of_string(header, heading))
ignoring = set(col_indexes_ignoring)
return set(range(len(header))) - time_columns - ignoring
def _timestamp_columns(timestamp_cols):
reserved_columns = [col for col in timestamp_cols if col is not None]
return reserved_columns
def _determine_types_of_non_time_cols(columns, lines, delimiter, quote, header):
"""Returns dict with lists as values. The lists contain column indexes
that have not been assigned to the ID column, cycle column, or time stamps.
"""
int_records_found = {}
possible_zips = {}
float_cols = []
numeric_containing_commas = []
alphanumeric_cols = []
alpha_only_cols = []
zip_plus_4_cols = []
columns_assigned = []
for i, line in enumerate(lines):
record = _record_from_line(line, delimiter, quote, header)
if record:
for col in columns:
if col in columns_assigned:
continue
val = record[col]
if val[0] == 0 and val[1] != '.':
alphanumeric_cols.append(col)
elif ',' in val:
if _numeric_containing_commas(val):
numeric_containing_commas.append(col)
else:
alphanumeric_cols.append(col)
elif _has_form_of_5_digit_zip(val):
possible_zips[col] = 1
elif _has_form_of_zip_plus_4_code(val):
zip_plus_4_cols.append(val)
elif _is_numeric(val):
if _is_float(val):
float_cols.append(col)
else:
int_records_found[col] = 1
elif _contains_digits(val):
alphanumeric_cols.append(col)
else:
alpha_only_cols.append(col)
columns_assigned = (float_cols + numeric_containing_commas +
zip_plus_4_cols + alphanumeric_cols +
alpha_only_cols)
for col in int_records_found.keys():
if col in possible_zips.keys():
possible_zips.pop(col)
for col in float_cols:
if col in int_records_found.keys():
int_records_found.pop(col)
cols_grouped = {group: cols for group, cols
in [('floats', float_cols),
('ints', list(int_records_found.keys())),
('numeric_commas', numeric_containing_commas),
('alphanumeric', alphanumeric_cols),
('alpha_only', alpha_only_cols),
('possible_zips', list(possible_zips.keys()))]
if cols}
return cols_grouped
def _is_float(val):
try:
assert isinstance(int(val), int)
except ValueError:
try:
assert isinstance(float(val), float)
except ValueError:
return False
else:
return True
else:
return False
def _has_form_of_5_digit_zip(val):
if len(val) == 5 and val.isdigit():
return True
def _has_form_of_zip_plus_4_code(val):
if len(val) == 10 and val[5] == '-' and val.replace('-', '').isdigit():
return True
else:
return False
def _detect_id_other_cols(raw_file, header, timestamp_cols,
data_cols, cycle_col=None, id_col=None,
delimiter=None, quote=None, encoding=None):
if cycle_col:
data_cols['cycle_col'] = cycle_col
if id_col:
data_cols['id_col'] = id_col
return data_cols
elif data_cols.get('alphanumeric') and len(data_cols['alphanumeric']) == 1:
data_cols['id_col'] = data_cols['alphanumeric'][0]
return data_cols
else:
sample_records = []
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for i, line in enumerate(lines):
record = _record_from_line(line, delimiter, quote, header)
if record:
possible_id_col_data = _data_in_possible_id_cols(record,
data_cols)
# possible_id_col_contains 2-tuple in which each part
# contains either a list of ints (data) or None
sample_records.append(possible_id_col_data)
if i == 1000:
break
sample_arr = np.array(sample_records)
possible_id_col_indexes = [data_cols[cols] for cols in ['alphanumeric', 'ints']
if data_cols.get(cols)]
# Detect ID column based on headings in columns
if (_contains_id_heading(header, 0) and
_contains_id_heading(header, 1)):
id_col = _primary_id_col_from_two_fields(sample_arr)
data_cols['id_col'] = id_col
elif _contains_id_heading(header, 0):
data_cols['id_col'] = 0
if id_col in [col for col in [timestamp_cols + [cycle_col]] if col is not None]:
data_cols['id_col'] = None
cols_with_max_val_over_150 = []
col_of_max_val, max_val = None, 0
col_of_max_std, max_std = None, 0
for col in possible_id_col_indexes:
if np.amax(sample_arr[:, col]) > 150:
cols_with_max_val_over_150.append(col)
args = [col, sample_arr, np.amax, col_of_max_val, max_val]
col_of_max_val, max_val = _compare_with_max(*args)
args = [col, sample_arr, np.std, col_of_max_std, max_std]
col_of_max_std, max_std = _compare_with_max(*args)
if len(cols_with_max_val_over_150) == 1:
id_col = cols_with_max_val_over_150[0]
elif len(cols_with_max_val_over_150) > 1:
id_col = col_of_max_val
else:
id_col = col_of_max_std
data_cols['id_col'] = id_col
return data_cols
def _remove_alphas(val):
for char in val:
if not char.isdigit():
val = val.replace(char, '')
if len(val):
return val
else:
return None
def _is_numeric(val):
try:
assert isinstance(float(val), float)
except ValueError:
return False
else:
return True
def _numeric_containing_commas(val):
ones_tens_and_greater = val.split('.')[0]
split_by_commas = ones_tens_and_greater.split(',')
first_group = split_by_commas[0]
if len(first_group) > 3 and first_group.isdigit() and first_group[0] != '0':
return False
for group in split_by_commas[1:]:
if len(group) == 3 and group.isdigit():
continue
else:
return False
decimal_part = val.split('.')[-1]
if decimal_part.isdigit():
return True
else:
return False
def _data_in_possible_id_cols(record, data_cols):
if data_cols.get('alphanumeric'):
alphanumeric_ints = [int(_remove_alphas(record[col_index])
for col_index in data_cols['alphanumeric'])]
else:
alphanumeric_ints = None
if data_cols.get('ints'):
ints = [int(record[col_index]) for col_index in data_cols['ints']]
else:
ints = None
return alphanumeric_ints, ints
def _compare_with_max(col, sample_records, func, current_max_col, current_max):
column_vals = sample_records[:, col]
column_func_result = func(column_vals)
if column_func_result > current_max:
current_max = column_func_result
current_max_col = col
return current_max_col, current_max
def _detect_mixed_alpha_numeric_id_col(alphanumeric_cols, header, sample_records):
id_col = None
for col in alphanumeric_cols:
if _contains_id_heading(header, col):
id_col = col
break
if id_col is None:
id_col = _col_with_alpha_or_alphas_in_string(sample_records, header,
alphanumeric_cols)
return id_col
def _detect_cycle_col(raw_file, header, cycle_mode, delimiter,
auto=None, cycle_col_heading=None, quote=None,
encoding=None):
if auto and auto != 'cycles':
return None
if cycle_col_heading:
with open(raw_file, encoding=encoding) as f:
first_line = f.readline()
delimiter = _determine_delimiter(first_line)
quote = _determine_quote(first_line)
header = _parse_line(first_line, delimiter, quote)
cycle_col = _column_index_of_string(header, cycle_col_heading)
else:
cycle_col = None
if cycle_mode:
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
cycle_col = _cycle_col_in_records(lines, header, delimiter,
cycle_mode, quote=quote)
if cycle_col is None:
raise ValueError('No column found containing value ' + cycle_mode)
return cycle_col
def _cycle_col_in_records(lines, header, delimiter, cycle, quote=None):
cycle_col = None
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if cycle in record:
cycle_col = record.index(cycle)
break
if cycle_col is None:
msg = ('No column found containing value \'' + cycle + '\'\n')
raise ValueError(msg)
return cycle_col
def _contains_id_heading(header, col_index):
if 'ID' in header[col_index].upper():
return True
else:
return False
def _primary_id_col_from_two_fields(sample_records):
ids_in_col_0 = _ids_in_col(sample_records, 0)
ids_in_col_1 = _ids_in_col(sample_records, 1)
if len(ids_in_col_0) >= len(ids_in_col_1):
id_col = 0
else:
id_col = 1
return id_col
def _ids_in_col(sample_records, col):
ids_with_repeats = sample_records[:, col]
id_arr = np.unique(ids_with_repeats)
return id_arr
def _col_with_alpha_or_alphas_in_string(records, header, alphanumeric_cols):
"""Returns index of first column with both non-digits and digits if one
exists. Otherwise, returns None."""
header_len = len(header)
record = records[0, :]
col = _col_containing_digit_or_digits(record, header_len,
alphanumeric_cols)
return col
def _col_containing_digit_or_digits(record, header_len, alphanumeric_cols):
"""Returns index of column in record with both non-digits and digits if
one exists. Otherwise, returns None."""
if record.shape[1] == header_len:
for col in alphanumeric_cols:
for char in record[col]:
if char.isdigit():
return col
return None
def _validate_cycle_mode(cycle, cycle_mode):
if cycle is None:
return True
elif cycle == cycle_mode:
return True
# Cycle mode does not match the mode specified in the kwargs
else:
return False
def _validate_id(id, ids):
if any([ids is not None and id in ids, ids is None]):
return True
return False
def _validate_time_stamp(time_string):
try:
kwarg = {'infer_datetime_format': True}
assert isinstance(pd.to_datetime(time_string, **kwarg), dt.datetime)
except ValueError:
return False
else:
return True
def _header_and_id_col_if_heading_or_preconfig(raw_file, encoding='UTF-8',
cycle=None, delimiter=None,
id_col_heading=None, auto=None,
quote=None, is_postal_file=None,
is_sensors_file=None):
id_col_index = None
with open(raw_file, encoding=encoding) as f:
header = f.readline()
if id_col_heading:
kwargs = {'delimiter': delimiter, 'quote': quote}
id_col_and_more = _id_col_delim_quote_from_id_heading(header,
id_col_heading,
**kwargs)
id_col_index, delimiter, quote = id_col_and_more
else:
quote = _determine_quote(header, quote=quote)
delimiter = _determine_delimiter(header, auto=auto, cycle=cycle,
id_col_heading=id_col_heading,
quote=quote)
header = _parse_line(header, delimiter, quote)
if is_postal_file or is_sensors_file:
return header, id_col_index
if (id_col_heading is None) and (auto is None):
id_col_index = _id_col_index_for_preconfig_non_auto_file_format(header)
return header, id_col_index
def _remove_newline_and_any_trailing_delimiter(line, delimiter=None):
return line.rstrip(delimiter + '\n')
def _id_col_delim_quote_from_id_heading(line, id_col_heading, quote=None,
delimiter=None):
quote = _determine_quote(line, quote=quote)
if delimiter is None:
delimiter = _determine_delimiter(line, id_col_heading=id_col_heading, quote=quote)
header = _parse_line(line, delimiter, quote)
if len(header) < 3:
raise ValueError('Only ', len(header), ' columns detected based '
'on ', delimiter, ' as delimiter.')
else:
col_index = _column_index_of_string(header, id_col_heading)
return col_index, delimiter, quote
def _column_index_of_string(line, string):
if string in line:
return line.index(string)
else:
raise NameError('Please check the string argument, ', string, ', '
'it was not found.')
def _parse_line(line, delimiter, quote):
line = _remove_newline_and_any_trailing_delimiter(line, delimiter=delimiter)
if _character_found_in_line(line, delimiter):
parsed_line = csv.reader([line], delimiter=delimiter, quotechar=quote,
skipinitialspace=True)
return tuple(list(parsed_line)[0])
else:
msg = 'Delimiter specified, ' + delimiter + ', not found in header.'
raise ValueError(msg)
def _id_col_index_for_preconfig_non_auto_file_format(header):
config_cols_func_map = {SENSOR_FIELDS: SENSOR_ID_INDEX,
CYCLE_FIELDS: CYCLE_ID_INDEX,
GEOSPATIAL_FIELDS: GEOSPATIAL_ID_INDEX}
try:
id_col_index = config_cols_func_map[header]
except KeyError:
print('Header not matched in id_col_index with headers in config.ini file.')
return id_col_index
# def _remove_commas_from_numeric_strings(items, delimiter, quote=None):
# for i, val in enumerate(items):
# if ',' in val:
# if _numeric_containing_commas(val):
# items[i] = val.replace(',', '')
# elif delimiter == ',' and quote:
# items[i] = quote + val + quote
# items = tuple(items)
# return items
def _create_col_meta(header, id_other_cols, time_stamps, cols_to_ignore, cycle_col=None):
meta = _col_indexes_and_types_for_meta(id_other_cols, time_stamps,
header, cols_to_ignore,
cycle_col=cycle_col)
cols_meta = {}
for m in meta:
cols_meta[m[0]] = {'heading': m[3],
'type': m[2], 'position': m[1]}
return cols_meta
def _col_indexes_and_types_for_meta(id_other_cols, time_stamps, header,
cols_to_ignore, cycle_col=None):
"""Takes list of tuples containing lists of column indexes and the
corresponding type labels."""
# For the non-required columns, using heading labels as keys
# Records contain: 1) key 2) position 3) type(general), 4) label
meta = []
id_col = id_other_cols['id_col']
id_type = _data_type_in_col(id_col, id_other_cols)
meta.append(_create_meta_for_col('id', id_col, id_type, header))
if cycle_col:
cycle_type = _data_type_in_col(cycle_col, id_other_cols)
meta.append(_create_meta_for_col('cycle', cycle_col, cycle_type, header))
if time_stamps[1] is not None:
meta.append(_create_meta_for_col('start_time', time_stamps[0], 'time', header))
meta.append(_create_meta_for_col('end_time', time_stamps[1], 'time', header))
else:
meta.append(_create_meta_for_col('time', time_stamps[0], 'time', header))
for col in _non_time_cols(header, time_stamps, cols_to_ignore):
if col in [c for c in [id_col, cycle_col] if c is not None]:
continue
data_cat = _get_data_category_of_column(col, id_other_cols)
meta.append(_create_meta_for_col(header[col], col, data_cat, header))
return meta
def _data_type_in_col(col_index, id_other_cols):
for data_type, meta in id_other_cols.items():
if isinstance(meta, list) and col_index in meta:
return data_type
def _create_meta_for_col(key, position_in_header, data_category, header):
return key, position_in_header, data_category, header[position_in_header]
def _get_data_category_of_column(col, id_other_cols):
for k, v in id_other_cols.items():
if isinstance(v, list) and col in v:
return k
def _determine_delimiter(line, id_col_heading=None, cycle=None, auto=None, quote=None, header=None):
quote = _determine_quote(line, quote=quote)
comma_possible_delimiter = False
non_comma_delimiters = []
delim_kwargs = {'header': header, 'auto': auto, 'cycle': cycle,
'id_col_heading': id_col_heading}
if _delimiter_gives_minimum_number_of_columns(line, ',', quote,
**delim_kwargs):
comma_possible_delimiter = True
for d in ['\t', '|']:
if _delimiter_gives_minimum_number_of_columns(line, d, quote,
**delim_kwargs):
non_comma_delimiters.append(d)
if not any([comma_possible_delimiter, non_comma_delimiters]):
if _delimiter_gives_minimum_number_of_columns(line, ' ', quote,
**delim_kwargs):
non_comma_delimiters.append(' ')
return _only_possible_delimiter_or_raise_error(comma_possible_delimiter,
non_comma_delimiters)
def _only_possible_delimiter_or_raise_error(comma_possible, non_comma_possible):
delims = [(',', 'Commas'), ('\t', 'Tabs'), ('|', 'Pipes'), (' ', 'Spaces')]
if comma_possible:
if non_comma_possible:
delimiter_chars = [','] + non_comma_possible
delimiters = {d: desc for d, desc in delims if d in delimiter_chars}
_multiple_possible_delimiters(delimiters)
else:
return ','
else:
if len(non_comma_possible) > 1:
delimiters = {d: desc for d, desc in delims if d in non_comma_possible}
_multiple_possible_delimiters(delimiters)
elif len(non_comma_possible) == 1:
return non_comma_possible[0]
else:
raise ValueError('Header does not appear to have commas (\',\'), '
'tabs (\'\t\'), pipes (\'|\') or spaces (\' \') '
'as delimiters. Please specify.')
def _multiple_possible_delimiters(delimiters):
print('The following is a (or are) possible delimiter(s): ')
for d in delimiters:
print(delimiters[d], ': ', d)
raise ValueError('Specify \'delimiter=\' in keyword arguments')
def _delimiter_gives_minimum_number_of_columns(line, delimiter, quote,
header=None, auto=None,
cycle=None, id_col_heading=None):
if delimiter is not None and delimiter == quote:
raise ValueError('Delimiter ', delimiter, ' and quote character ',
quote, ' are the same.')
# If the first line is being parsed, the header argument is None by default.
if not _character_found_in_line(line, delimiter):
return False
else:
line = line.rstrip(delimiter + '\n')
testdelim = csv.reader([line], delimiter=delimiter, quotechar=quote,
skipinitialspace=True)
parsed_line = list(testdelim)[0]
if header is None:
if id_col_heading:
assert _column_index_of_string(parsed_line, id_col_heading)
return _minimum_number_of_columns_exist(parsed_line)
else:
if auto or cycle:
ets = _expected_time_stamps(auto, cycle)
else:
ets = None
et = {'expected_time_stamps': ets}
return _minimum_number_of_columns_exist(parsed_line, **et)
def _character_found_in_line(line, char):
delimre = re.compile(char)
return bool(delimre.search(line))
def _expected_time_stamps(auto, cycle=None):
if auto in ('sensors', 'geospatial'):
return 1
elif auto == 'cycles' or cycle:
return 2
else:
raise ValueError('Value of auto argument (\', auto, \') not found.\n \
Should be \'sensors\', \'geospatial\', or \'cycles\'')
def _minimum_number_of_columns_exist(csv_reader_out, expected_time_stamps=None):
if len(list(csv_reader_out)) >= 3:
if expected_time_stamps: # Data row is being parsed
if _number_of_time_stamps_matches(csv_reader_out,
expected_time_stamps):
return True
else:
return False
# The header is being parsed and there are no time stamp values
else:
return True
else:
return False
def _number_of_time_stamps_matches(parsed_line, num_expected_time_stamps):
time_stamps = 0
for val in parsed_line:
if _validate_time_stamp(val):
time_stamps += 1
if time_stamps == num_expected_time_stamps:
return True
else:
return False
def _determine_quote(line, quote=None):
quotech = None
if quote:
quotere = re.compile(quote)
if bool(quotere.search(line)):
quotech = quote
return quotech
else:
msg = quote + ' not found as quote. Check if quote character needs to be ' \
'escaped with a \\. \n \" and \' are detected automatically.'
raise ValueError(msg)
for q in ['\"', '\'']:
quotere = re.compile(q)
if bool(quotere.search(line)):
quotech = q
break
return quotech
def _sensors_ids_in_states(**kwargs):
if kwargs.get('states'):
sensors_ids = (_sensors_states_df(**kwargs)
.index
.unique()
.ravel()
.astype(np.unicode))
else:
sensors_ids = None
return sensors_ids
def _contains_digits(line):
digits = re.compile('\d')
return bool(digits.search(line))
def _missing_sensors_or_postal_error_message():
print('State(s) specified but sensors and/or postal codes not '
'specified.')
# cleanthermo / fixedautohelpers
def _sensors_states_df(**kwargs):
"""Returns pandas dataframe with sensor metadata and location
information for sensors in specified states.
"""
postal_file, sensors_file = (kwargs.get(k) for k
in ['postal_file', 'sensors_file'])
states = (kwargs.get('states')).split(',')
auto = kwargs.get('auto') if kwargs.get('auto') else None
zip_codes_df = _zip_codes_in_states(postal_file, states, auto)
thermos_df = _sensors_df(sensors_file, auto)
header_kwargs = {'is_sensors_file': True}
header, _ = _header_and_id_col_if_heading_or_preconfig(sensors_file,
**header_kwargs)
zip_heading = _label_of_col_containing_string_lower_upper_title(header, 'zip')
sensors_states_df = pd.merge(thermos_df, zip_codes_df, how='inner',
left_on=zip_heading,
right_index=True)
return sensors_states_df
def _zip_codes_in_states(postal_file, states, auto):
"""Returns pandas dataframe based on postal code metadata file, for states
specified as list.
"""
header, _ = _header_and_id_col_if_heading_or_preconfig(postal_file,
is_postal_file=True)
if auto:
zip_col = _index_of_col_with_string_in_lower_upper_or_title(header, 'zip')
if zip_col is None:
zip_col = _index_of_col_with_string_in_lower_upper_or_title(header,
'post')
else:
zip_col = _index_of_col_with_string_in_lower_upper_or_title(header,
POSTAL_FILE_ZIP)
zip_col_label = header[zip_col]
dtype_zip_code = {zip_col_label: 'str'}
if os.path.splitext(postal_file)[1] == '.csv':
zips_default_index_df = pd.read_csv(postal_file, dtype=dtype_zip_code)
else:
zips_default_index_df = pd.read_table(postal_file,
dtype=dtype_zip_code)
zips_default_index_df[zip_col_label] = zips_default_index_df[zip_col_label]\
.str.pad(5, side='left', fillchar='0')
zips_unfiltered_df = zips_default_index_df.set_index([zip_col_label])
state_filter = zips_unfiltered_df[POSTAL_TWO_LETTER_STATE].isin(states)
zip_codes_df = zips_unfiltered_df.loc[state_filter]
return zip_codes_df
def _sensors_df(sensors_file, auto, encoding='UTF-8', delimiter=None):
"""Returns pandas dataframe of sensor metadata from raw file."""
kwargs = {'encoding': encoding, 'is_sensors_file': True}
header, _ = _header_and_id_col_if_heading_or_preconfig(sensors_file,
**kwargs)
sample_records, _, _ = _select_sample_records(sensors_file, header,
encoding=encoding)
if auto:
zip_col = _index_of_col_with_string_in_lower_upper_or_title(header,
'zip')
zip_col_label = header[zip_col]
if _contains_id_heading(header, 0) and _contains_id_heading(header, 1):
id_col = _primary_id_col_from_two_fields(sample_records)
else:
id_col = _index_of_col_with_string_in_lower_upper_or_title(header, 'id')
if id_col is None:
raise ValueError('No column found in sensors file with label '
'containing \'id\', \'Id\', or \'ID\'.')
id_col_heading = header[id_col]
else:
zip_col_label = SENSOR_ZIP_CODE
id_col_heading = SENSOR_DEVICE_ID
dtype_sensor = {zip_col_label: 'str', id_col_heading: 'str'}
if os.path.splitext(sensors_file)[1] == '.csv':
thermos_df = pd.read_csv(sensors_file,
dtype=dtype_sensor)
else:
thermos_df = pd.read_table(sensors_file,
dtype=dtype_sensor)
thermos_df.set_index(keys=id_col_heading, inplace=True)
thermos_df[zip_col_label] = thermos_df[zip_col_label].str.pad(5, side='left',
fillchar='0')
return thermos_df
def _label_of_col_containing_string_lower_upper_title(header, string):
index = _index_of_col_with_string_in_lower_upper_or_title(header, string)
if index:
return header[index]
else:
return None
def _index_of_col_with_string_in_lower_upper_or_title(header, string):
for col, val in enumerate(header):
label = header[col]
if any([(s in label) for s in [string, string.title(),
string.upper()]]):
return col
return None
def _locations_in_states(**kwargs):
"""Returns location IDs for locations in specified states."""
if kwargs.get('states'):
thermos_states_df = _sensors_states_df(**kwargs)
location_ids_in_states = (thermos_states_df[SENSOR_LOCATION_ID]
.unique()
.ravel()
.astype(np.unicode))
else:
location_ids_in_states = None
return location_ids_in_states
# Fixed (static) file format handling
def _data_type_matching_header(header):
"""Returns a string indicating the type of data corresponding to a header
in a text file, where the header is a comma-separated string in which
each element is itself a string.
"""
if SENSOR_ID_FIELD in header:
data_type = 'sensors'
field_data_mapping = {UNIQUE_CYCLE_FIELD_INDEX: 'cycles',
UNIQUE_GEOSPATIAL_FIELD: 'geospatial'}
fields_as_keys = set(field_data_mapping.keys())
field_in_header = set.intersection(fields_as_keys, set(header))
if len(field_in_header):
field = field_in_header.pop()
data_type = field_data_mapping[field]
return data_type
def _clean_cycles(raw_file, **kwargs):
"""Returns dict for cycling start and end times for sensors, which may
be filtered using 'states' parameter, a string that is a comma-separated
series of state abbreviations.
"""
clean_records = {}
args = ['states', 'cycle', 'delimiter', 'quote', 'header', 'cols_meta']
states, cycle, delimiter, quote, header, cols_meta = (kwargs.get(k) for k
in args)
id_col = _id_col_position(cols_meta)
id_is_int = _id_is_int(cols_meta)
data_cols = _non_index_col_types(cols_meta)
if states:
thermos_ids = _sensors_ids_in_states(**kwargs)
with open(raw_file, encoding=kwargs.get('encoding')) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and all(_validate_cycles_record(record, ids=thermos_ids,
cycle=cycle)):
# Cycle named tuple declaration is global, in order to ensure
# that named tuples using it can be pickled.
# Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode',
# 'start_time'])
id_val = _id_val(record, id_col, id_is_int)
multicols = Cycle(device_id=id_val,
cycle_mode=_cycle_type(record),
start_time=_start_cycle(record))
clean_records[multicols] = _record_vals(record, data_cols)
else:
clean_records = _clean_cycles_all_states(raw_file, **kwargs)
return clean_records
def _clean_sensors(raw_file, **kwargs):
"""Returns dict for sensor data, which may be filtered using
'states' parameter, a string that is a comma-separated series of state
abbreviations.
"""
clean_records = {}
args = ['states', 'header', 'delimiter', 'quote', 'cols_meta', 'encoding']
states, header, delimiter, quote, cols_meta, encoding = (kwargs.get(k)
for k in args)
id_is_int = _id_is_int(cols_meta)
id_col = _id_col_position(cols_meta)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
if states:
thermos_ids = _sensors_states_df(**kwargs).index.ravel()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and all(_validate_sensors_record(record,
ids=thermos_ids)):
# Sensor named tuple declared globally, to enable pickling to
# work.
# Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp'])
id_val = _id_val(record, id_col, id_is_int)
multicols = Sensor(sensor_id=id_val,
timestamp=_sensor_timestamp(record))
clean_records[multicols] = _sensor_observation(record)
else:
clean_records = _clean_sensors_all_states(raw_file, **kwargs)
return clean_records
def _clean_sensors_all_states(raw_file, **kwargs):
"""Returns dict for observations recorded by sensors, regardless
of state."""
clean_records = {}
args = ['header', 'delimiter', 'quote', 'encoding', 'cols_meta']
header, delimiter, quote, encoding, cols_meta = (kwargs.get(k)
for k in args)
id_col = _id_col_position(cols_meta)
id_is_int = _id_is_int(cols_meta)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record:
# Sensor named tuple declaration is global, in order to ensure that
# named tuples using it can be pickled.
# # Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp'])
id_val = _id_val(record, id_col, id_is_int)
multicols = Sensor(sensor_id=id_val,
timestamp=_sensor_timestamp(record))
clean_records[multicols] = _sensor_observation(record)
return clean_records
def _validate_sensors_record(record, ids=None):
"""Validate that line of text file containing indoor temperatures data
has expected data content.
"""
if ids is not None:
yield _leading_id(record) in ids
def _clean_cycles_all_states(raw_file, **kwargs):
"""Returns dict for cycle start and end times of sensors, regardless of
state.
"""
clean_records = {}
args = ['cycle', 'header', 'delimiter', 'quote', 'encoding', 'cols_meta']
cycle, header, delimiter, quote, encoding, cols_meta = (kwargs.get(k) for
k in args)
id_col = _id_col_position(cols_meta)
id_is_int = _id_is_int(cols_meta)
data_cols = _non_index_col_types(cols_meta)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and all(_validate_cycles_record(record, cycle=cycle)):
# Cycle named tuple declaration is global, in order to ensure that
# named tuples using it can be pickled.
# Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode',
# 'start_time'])
id_val = _id_val(record, id_col, id_is_int)
multicols = Cycle(device_id=id_val,
cycle_mode=_cycle_type(record),
start_time=_start_cycle(record))
clean_records[multicols] = _record_vals(record, data_cols)
return clean_records
def _validate_cycles_record(record, ids=None, cycle=None):
"""Validate that line of text file containing cycing data
has expected data content.
"""
if ids is not None:
yield _leading_id(record) in ids
if cycle:
yield _cycle_type(record) == cycle
def _clean_geospatial(raw_file, **kwargs):
"""Returns dict for outdoor temperatures by location, which may be filtered
using 'states' parameter, a string that is a comma-separated series of
state abbreviations.
"""
clean_records = {}
args = ['states', 'delimiter', 'quote', 'header', 'cols_meta', 'encoding']
states, delimiter, quote, header, cols_meta, encoding = (kwargs.get(k)
for k in args)
id_is_int = _id_is_int(cols_meta)
id_col = _id_col_position(cols_meta)
if states:
location_ids = _locations_in_states(**kwargs)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record and all(_validate_geospatial_record(record,
ids=location_ids)):
id_val = _id_val(record, id_col, id_is_int)
multicols = Geospatial(location_id=id_val,
timestamp=_geospatial_timestamp(record))
clean_records[multicols] = _geospatial_obs(record)
else:
clean_records = _clean_geospatial_all_states(raw_file, **kwargs)
return clean_records
def _clean_geospatial_all_states(raw_file, **kwargs):
"""Returns dict for outdoor temperatures by location, regardless of
state.
"""
clean_records = {}
args = ['delimiter', 'quote', 'header', 'encoding', 'cols_meta']
delimiter, quote, header, encoding, cols_meta = (kwargs.get(k)
for k in args)
id_is_int = _id_is_int(cols_meta)
id_col = _id_col_position(cols_meta)
with open(raw_file, encoding=encoding) as lines:
_ = lines.readline()
for line in lines:
record = _record_from_line(line, delimiter, quote, header)
if record:
# Geospatial named tuple declared globally to enable pickling.
# The following is here for reference.
# Geospatial = namedtuple('Geospatial', ['location_id', 'timestamp'])
id_val = _id_val(record, id_col, id_is_int)
multicols = Geospatial(location_id=id_val,
timestamp=_geospatial_timestamp(record))
clean_records[multicols] = _geospatial_obs(record)
return clean_records
def _id_col_position(cols_meta):
return cols_meta['id']['position']
def _id_is_int(cols_meta):
id_is_int = True if cols_meta['id']['type'] == 'ints' else False
return id_is_int
def _id_val(record, id_col, id_is_int):
id_val = int(record[id_col]) if id_is_int else record[id_col]
return id_val
def _validate_geospatial_record(record, ids=None):
"""Validate that line of text file containing outdoor temperatures data
has expected content.
"""
if ids is not None:
yield _leading_id(record) in ids
def _leading_id(record):
return record[0]
def _cycle_type(record):
return record[CYCLE_TYPE_INDEX]
def _start_cycle(record):
return record[CYCLE_START_INDEX]
def _cycle_record_vals(record):
start = CYCLE_END_TIME_INDEX
end = len(CYCLE_FIELDS)
return record[start:end]
def _sensor_timestamp(record):
timestamp_position = SENSORS_LOG_DATE_INDEX
return record[timestamp_position]
def _numeric_leads(record):
"""Returns True if all characters in the leading string element in a
sequence are digits.
"""
return True if record[0].isdigit() else False
def _sensor_observation(record):
degrees_position = SENSORS_DATA_INDEX
return record[degrees_position]
def _inside_rec_len(record):
return True if len(record) == len(SENSOR_FIELDS) else False
def _geospatial_timestamp(record):
timestamp_position = GEOSPATIAL_LOG_DATE_INDEX
return record[timestamp_position]
def _geospatial_obs(record):
degrees_position = GEOSPATIAL_OBSERVATION_INDEX
return record[degrees_position]
| {
"repo_name": "nickpowersys/CaaR",
"path": "caar/cleanthermostat.py",
"copies": "1",
"size": "93813",
"license": "bsd-3-clause",
"hash": 2010169902647102700,
"line_mean": 42.8378504673,
"line_max": 414,
"alpha_frac": 0.606525748,
"autogenerated": false,
"ratio": 4.1196645002634815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226190248263481,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict, Iterator
import copy
from functools import partial
from hashlib import md5
import inspect
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from . import sharedict
from .compatibility import bind_method, unicode, PY3
from .context import _globals
from .core import flatten
from .utils import Dispatch, ensure_dict
from .sharedict import ShareDict
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def persist(self, **kwargs):
""" Persist this dask collection into memory
See ``dask.base.persist`` for full docstring
"""
(result,) = persist(self, **kwargs)
return result
def compute(self, **kwargs):
""" Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask.array turns into a NumPy array and a Dask.dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> compute({'a': a, 'b': b, 'c': 1}) # doctest: +SKIP
({'a': 45, 'b': 4.5, 'c': 1},)
"""
from dask.delayed import delayed
traverse = kwargs.pop('traverse', True)
if traverse:
args = tuple(delayed(a)
if isinstance(a, (list, set, tuple, dict, Iterator))
else a for a in args)
optimize_graph = kwargs.pop('optimize_graph', True)
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = collections_to_dsk(variables, optimize_graph, **kwargs)
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([optimization_function(arg)(ensure_dict(arg.dask), arg._keys())
for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
function_cache = {}
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice, complex),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@normalize_token.register(Base)
def normalize_base(b):
return type(b).__name__, b.key
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([unicode(item).encode('utf-8') for item in
x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return 'np.' + name
except:
return normalize_function(x)
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
def dont_optimize(dsk, keys):
return dsk
def optimization_function(obj):
if isinstance(obj, type):
cls = obj
else:
cls = type(obj)
name = cls.__name__.lower() + '_optimize' # dask.set_options(array_optimize=foo)
if name in _globals:
return _globals[name] or dont_optimize
try:
return cls._optimize
except AttributeError:
return dont_optimize
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if optimize_graph:
groups = groupby(optimization_function, collections)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(ensure_dict(dsk), keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
else:
dsk = ensure_dict(sharedict.merge(*[c.dask for c in collections]))
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
dsk = {}
keys = []
for v in vals:
d = v.dask
if type(d) is ShareDict:
for dd in d.dicts.values():
dsk.update(dd)
else:
dsk.update(v.dask)
keys.append(v._keys())
return dsk, keys
def redict_collection(c, dsk):
cc = copy.copy(c)
cc.dask = dsk
return cc
def persist(*args, **kwargs):
""" Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small NumPy
arrays (in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler ``get`` function.
Returns
-------
New dask collections backed by in-memory data
"""
collections = [a for a in args if isinstance(a, Base)]
if not collections:
return args
get = kwargs.pop('get', None) or _globals['get']
if inspect.ismethod(get):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == _globals['get']:
collections = client.persist(collections, **kwargs)
if isinstance(collections, list): # distributed is inconsistent here
collections = tuple(collections)
else:
collections = (collections,)
results_iter = iter(collections)
return tuple(a if not isinstance(a, Base)
else next(results_iter)
for a in args)
optimize_graph = kwargs.pop('optimize_graph', True)
if not get:
get = collections[0]._default_get
if not all(a._default_get == get for a in collections):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys = list(flatten([var._keys() for var in collections]))
results = get(dsk, keys, **kwargs)
d = dict(zip(keys, results))
result = [redict_collection(c, {k: d[k]
for k in flatten(c._keys())})
for c in collections]
results_iter = iter(result)
return tuple(a if not isinstance(a, Base)
else next(results_iter)
for a in args)
if PY3:
Base.persist.__doc__ = persist.__doc__
| {
"repo_name": "mraspaud/dask",
"path": "dask/base.py",
"copies": "1",
"size": "20309",
"license": "bsd-3-clause",
"hash": -7147707831151094000,
"line_mean": 32.9048414023,
"line_max": 89,
"alpha_frac": 0.6000787828,
"autogenerated": false,
"ratio": 4.163386633866339,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5263465416666339,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from enum import Enum
import yaml
import json
from attr._make import fields
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
@singledispatch
def to_dict(obj, **kwargs):
"""
Convert an object into dictionary. Uses singledispatch to allow for
clean extensions for custom class types.
Reference: https://pypi.python.org/pypi/singledispatch
:param obj: object instance
:param kwargs: keyword arguments such as suppress_private_attr,
suppress_empty_values, dict_factory
:return: converted dictionary.
"""
# if is_related, then iterate attrs.
if is_model(obj.__class__):
return related_obj_to_dict(obj, **kwargs)
# else, return obj directly. register a custom to_dict if you need to!
# reference: https://pypi.python.org/pypi/singledispatch
else:
return obj
def related_obj_to_dict(obj, **kwargs):
""" Covert a known related object to a dictionary. """
# Explicitly discard formatter kwarg, should not be cascaded down.
kwargs.pop('formatter', None)
# If True, remove fields that start with an underscore (e.g. _secret)
suppress_private_attr = kwargs.get("suppress_private_attr", False)
# if True, don't store fields with None values into dictionary.
suppress_empty_values = kwargs.get("suppress_empty_values", False)
# get list of attrs fields
attrs = fields(obj.__class__)
# instantiate return dict, use OrderedDict type by default
return_dict = kwargs.get("dict_factory", OrderedDict)()
for a in attrs:
# skip if private attr and flag tells you to skip
if suppress_private_attr and a.name.startswith("_"):
continue
metadata = a.metadata or {}
# formatter is a related-specific `attrs` meta field
# see fields.DateField
formatter = metadata.get('formatter')
# get value and call to_dict on it, passing the kwargs/formatter
value = getattr(obj, a.name)
value = to_dict(value, formatter=formatter, **kwargs)
# check flag, skip None values
if suppress_empty_values and value is None:
continue
# field name can be overridden by the metadata field
key_name = a.metadata.get('key') or a.name
# store converted / formatted value into return dictionary
return_dict[key_name] = value
return return_dict
def to_model(cls, value):
"""
Coerce a value into a model object based on a class-type (cls).
:param cls: class type to coerce into
:param value: value to be coerced
:return: original value or coerced value (value')
"""
if isinstance(value, cls) or value is None:
pass # skip if right type or value is None
elif issubclass(cls, Enum):
value = cls(value)
elif is_model(cls) and isinstance(value, dict):
value = convert_key_to_attr_names(cls, value)
value = cls(**value)
else:
value = cls(value)
return value
def convert_key_to_attr_names(cls, original):
""" convert key names to their corresponding attribute names """
attrs = fields(cls)
updated = {}
keys_pulled = set()
for a in attrs:
key_name = a.metadata.get('key') or a.name
if key_name in original:
updated[a.name] = original.get(key_name)
keys_pulled.add(key_name)
if getattr(cls, '__related_strict__', False):
extra = set(original.keys()) - keys_pulled
if len(extra):
raise ValueError("Extra keys (strict mode): {}".format(extra))
return updated
def is_model(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: :class:`bool`
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False,
**kwargs):
"""
Serialize a Python object into a YAML stream with OrderedDict and
default_flow_style defaulted to False.
If stream is None, return the produced string instead.
OrderedDict reference: http://stackoverflow.com/a/21912744
default_flow_style reference: http://stackoverflow.com/a/18210750
:param data: python object to be serialized
:param stream: to be serialized to
:param Dumper: base Dumper class to extend.
:param kwargs: arguments to pass to to_dict
:return: stream if provided, string if stream is None
"""
class OrderedDumper(dumper_cls):
pass
def dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, dict_representer)
obj_dict = to_dict(obj, **kwargs)
return yaml.dump(obj_dict, stream, OrderedDumper,
default_flow_style=default_flow_style)
def from_yaml(stream, cls=None, loader_cls=yaml.Loader,
object_pairs_hook=OrderedDict, **extras):
"""
Convert a YAML stream into a class via the OrderedLoader class.
"""
class OrderedLoader(loader_cls):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
yaml_dict = yaml.load(stream, OrderedLoader) or {}
yaml_dict.update(extras)
return cls(**yaml_dict) if cls else yaml_dict
def to_json(obj, indent=4, sort_keys=True, **kwargs):
"""
:param obj: object to convert to dictionary and then output to json
:param indent: indent json by number of spaces
:param sort_keys: sort json output by key if true
:param kwargs: arguments to pass to to_dict
:return: json string
"""
obj_dict = to_dict(obj, **kwargs)
return json.dumps(obj_dict, indent=indent, sort_keys=sort_keys)
def from_json(stream, cls=None, object_pairs_hook=OrderedDict, **extras):
"""
Convert a JSON string or stream into specified class.
"""
stream = stream.read() if hasattr(stream, 'read') else stream
json_dict = json.loads(stream, object_pairs_hook=object_pairs_hook)
if extras:
json_dict.update(extras) # pragma: no cover
return to_model(cls, json_dict) if cls else json_dict
| {
"repo_name": "genomoncology/related",
"path": "src/related/functions.py",
"copies": "1",
"size": "6637",
"license": "mit",
"hash": -5423123351295688000,
"line_mean": 29.1681818182,
"line_max": 79,
"alpha_frac": 0.6594847069,
"autogenerated": false,
"ratio": 3.9814037192561487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5140888426156148,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from functools import partial
from hashlib import md5
from operator import attrgetter
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def compute(self, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
dsk = {}
keys = []
for v in vals:
# Optimization to avoid merging dictionaries in Delayed values. Reduces
# memory usage for large graphs.
if hasattr(v, '_dasks'):
for d in v._dasks:
dsk.update(d)
else:
dsk.update(v.dask)
keys.append(v._keys())
return dsk, keys
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If the object is a dask collection, it's
computed and the result is returned. Otherwise it's passed through
unchanged.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
if kwargs.get('optimize_graph', True):
groups = groupby(attrgetter('_optimize'), variables)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(dsk, keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
keys = [var._keys() for var in variables]
else:
dsk, keys = _extract_graph_and_keys(variables)
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register((tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@normalize_token.register(Base)
def normalize_base(b):
return type(b).__name__, b.key
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"repo_name": "gameduell/dask",
"path": "dask/base.py",
"copies": "2",
"size": "12768",
"license": "bsd-3-clause",
"hash": -4224822080165684700,
"line_mean": 32.1636363636,
"line_max": 90,
"alpha_frac": 0.5940632832,
"autogenerated": false,
"ratio": 4.043065231158962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5637128514358962,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from functools import partial
from pymel.core import dt, geometryConstraint, normalConstraint, pointConstraint, spaceLocator, xform
from .... import core
from .... import nodeApi
from ..cardRigging import MetaControl, ParamInfo, OutputControls, colorParity
from .. import controllerShape
from .. import log
from .. import space
from . import _util as util
from .. import node
def getUpVectors(j):
# HACK!!!! Needs to work in surfaceFollow
return dt.Vector(0, 0, 1), dt.Vector(0, 0, 1)
@util.adds()
@util.defaultspec( {'shape': 'box', 'size': 10, 'color': 'blue 0.22'},
manual={'shape': 'pin', 'size': 3, 'color': 'green 0.22', 'align': 'nx'},
offset={'shape': 'band', 'size': 5, 'color': 'green 0.22', 'align': 'nx'}
)
def buildSurfaceFollow(joints, groupOrigin, surface=None, controlSpec={}):
groupOrigin = dt.Vector(groupOrigin)
container = util.parentGroup(joints[0])
container.setParent( node.mainGroup() )
mainCtrl = controllerShape.build(
util.trimName(joints[0].getParent()) + 'Surface_ctrl',
controlSpec['main'],
type=controllerShape.ControlType.TRANSLATE )
mainCtrl = nodeApi.RigController.convert(mainCtrl)
mainCtrl.setParent(container)
xform(mainCtrl, ws=True, t=groupOrigin)
core.dagObj.lockScale(mainCtrl)
core.dagObj.zero(mainCtrl)
subControls = []
locs = []
offsets = []
for i, j in enumerate(joints):
loc = spaceLocator()
locs.append( loc )
core.dagObj.matchTo(loc, j)
geometryConstraint(surface, loc)
objUp, worldObjUp = getUpVectors(j)
normalConstraint(surface, loc,
wuo=mainCtrl,
wut='objectrotation',
upVector=objUp,
worldUpVector=worldObjUp)
offsetCtrl = controllerShape.build( util.trimName(j) + 'Offset_ctrl',
controlSpec['offset'],
type=controllerShape.ControlType.TRANSLATE )
core.dagObj.matchTo(offsetCtrl, loc)
offsets.append( offsetCtrl )
offsetCtrl.setParent(loc)
core.dagObj.zero(offsetCtrl)
subCtrl = controllerShape.build( util.trimName(j) + '_ctrl',
controlSpec['manual'],
type=controllerShape.ControlType.TRANSLATE )
subControls.append(subCtrl)
core.dagObj.matchTo(subCtrl, loc)
subCtrl.setParent(mainCtrl)
core.dagObj.zero(subCtrl)
pointConstraint(subCtrl, loc)
core.dagObj.lockRot(subCtrl)
core.dagObj.lockScale(subCtrl)
core.dagObj.lockScale(offsetCtrl)
loc.setParent(subCtrl)
space.add( offsetCtrl, loc, spaceName='surface')
mainCtrl.subControl[str(i)] = subCtrl
mainCtrl.subControl[str(i) + '_offset'] = offsetCtrl
constraints = util.constrainAtoB(joints, offsets)
mainCtrl.container = container
return mainCtrl, constraints
class SurfaceFollow(MetaControl):
''' Special controller providing translating bones simulating squash and stretch. '''
#displayInUI = False
ik_ = 'pdil.tool.fossil.rigging.surfaceFollow.buildSurfaceFollow'
ikInput = OrderedDict( [
('surface', ParamInfo('Mesh', 'The surface to follow', ParamInfo.NODE_0)),
#('rangeMin', ParamInfo( 'Min Range', 'Lower bounds of the keyable attr.', ParamInfo.FLOAT, -5.0)),
#('rangeMax', ParamInfo( 'Max Range', 'Upper bounds of the keyable attr.', ParamInfo.FLOAT, 5.0)),
#('scaleMin', ParamInfo( 'Shrink Value', 'When the attr is at the lower bounds, scale it to this amount.', ParamInfo.FLOAT, .5)),
#('scaleMax', ParamInfo( 'Expand Value', 'When the attr is at the upper bounds, scale it to this amount.', ParamInfo.FLOAT, 2)),
] )
#orientAsParent=True, min=0.5, max=1.5
@classmethod
def build(cls, card):
'''
Custom build that uses all the joints, except the last, which is used
as a virtual center/master control for all surface following joints.
'''
assert len(card.joints) > 2
pivotPoint = xform(card.joints[-1], q=True, ws=True, t=True)
joints = [j.real for j in card.joints[:-1]]
ikControlSpec = cls.controlOverrides(card, 'ik')
def _buildSide( joints, pivotPoint, isMirroredSide, side=None ):
log.Rotation.check(joints, True)
if side == 'left':
sideAlteration = partial( colorParity, 'L' )
elif side == 'right':
sideAlteration = partial( colorParity, 'R' )
else:
sideAlteration = lambda **kwargs: kwargs # noqa
kwargs = cls.readIkKwargs(card, isMirroredSide, sideAlteration)
kwargs.update( cls.ikArgs )
kwargs['controlSpec'].update( cls.ikControllerOptions )
kwargs.update( sideAlteration(**ikControlSpec) )
print('ARGS', joints, pivotPoint, kwargs)
ikCtrl, ikConstraints = cls.ik( joints, pivotPoint, **kwargs )
return OutputControls(None, ikCtrl)
suffix = card.findSuffix()
#if not util.canMirror( card.start() ) or card.isAsymmetric():
if not suffix or card.isAsymmetric():
#suffix = card.findSuffix()
if suffix:
ctrls = _buildSide(joints, pivotPoint, False, suffix)
else:
ctrls = _buildSide(joints, pivotPoint, False)
card.outputCenter.ik = ctrls.ik
else:
ctrls = _buildSide(joints, pivotPoint, False, 'left')
card.outputLeft.ik = ctrls.ik
pivotPoint[0] *= -1
joints = [j.realMirror for j in card.joints[:-1]]
ctrls = _buildSide(joints, pivotPoint, True, 'right' )
card.outputRight.ik = ctrls.ik
"""
@staticmethod
def getExtraControls(ctrl):
'''
Returns the objs the squasher controls follow, which have the set driven keys.
Cheesy at the moment because it's just the list of children (alphabetized).
'''
squashers = listRelatives(ctrl, type='transform')
return sorted( set(squashers) )
@classmethod
def saveState(cls, card):
sdkInfo = {}
for ctrl, side, kinematicType in card.getMainControls():
if kinematicType == 'ik':
sdkInfo[side] = [ lib.anim.findSetDrivenKeys(o) for o in cls.getExtraControls(ctrl) ]
state = card.rigState
state['squasherSDK'] = sdkInfo
card.rigState = state
@classmethod
def restoreState(cls, card):
state = card.rigState
if 'squasherSDK' not in state:
return
for ctrl, side, kinematicType in card.getMainControls():
if kinematicType == 'ik':
if side in state['squasherSDK']:
curves = state['squasherSDK'][side]
squashers = cls.getExtraControls(ctrl)
for squasher, crv in zip(squashers, curves):
lib.anim.applySetDrivenKeys(squasher, crv)
""" | {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/rigging/surfaceFollow.py",
"copies": "1",
"size": "7526",
"license": "bsd-3-clause",
"hash": -937649686206291800,
"line_mean": 35.014354067,
"line_max": 137,
"alpha_frac": 0.5876959872,
"autogenerated": false,
"ratio": 3.9320794148380354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019775402038036,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from itertools import repeat
import dask
import toolz as t
import daskfunk.utils as u
from daskfunk.compatibility import getargspec
_UNSPECIFIED = '::unspecified::'
_AMBIGUOUS = '::ambiguous::'
def _is_required(default):
return default == _UNSPECIFIED or default == _AMBIGUOUS
def _func_param_info(argspec):
params = argspec.args
defaults = argspec.defaults or []
start_default_ix = -max(len(defaults), 1) - 1
values = [_UNSPECIFIED] * (len(params) - len(defaults)) + \
list(defaults[start_default_ix:])
return OrderedDict(zip(params, values))
def _is_curry_func(f):
"""
Checks if f is a toolz or cytoolz function by inspecting the available attributes.
Avoids explicit type checking to accommodate all versions of the curry fn.
"""
return hasattr(f, 'func') and hasattr(f, 'args') and hasattr(f, 'keywords')
def _param_info(f):
if _is_curry_func(f):
argspec = getargspec(f.func)
num_args = len(f.args)
args_to_remove = argspec.args[0:num_args]
if f.keywords:
args_to_remove += list(f.keywords.keys())
base = _func_param_info(argspec)
return t.dissoc(base, *args_to_remove)
return(_func_param_info(getargspec(f)))
def _func_name(func):
if hasattr(func, 'func'):
return(_func_name(func.func))
else:
return func.__name__
def _partial_base_fn(partial_fn):
fn = partial_fn.func
if '__module__' not in dir(fn):
# for some reason the curry decorator nests the actual function
# metadata one level deeper
fn = fn.func
return fn
def _partial_inputs(partial_fn):
pargs = partial_fn.args
pkargs = partial_fn.keywords
f = _partial_base_fn(partial_fn)
spec = getargspec(f)
num_named_args = len(spec.args)
unnamed_args = dict(zip(spec.args, pargs[0:num_named_args]))
varargs = pargs[num_named_args:]
kargs = t.merge(pkargs, unnamed_args)
return varargs, kargs
def compile(fn_graph, get=dask.get):
fn_param_info = t.valmap(_param_info, fn_graph)
global_param_info = {}
for param_info in fn_param_info.values():
for kw, value in param_info.items():
if kw in global_param_info and global_param_info[kw] != value:
global_param_info[kw] = _AMBIGUOUS
else:
global_param_info[kw] = value
computed_args = set(fn_graph.keys())
required_params, defaulted = u.split_keys_by_val(_is_required,
global_param_info)
required_params = required_params - computed_args
all_params = required_params.union(defaulted)
default_args = u.select_keys(global_param_info, defaulted)
def to_task(res_key, param_info):
fn = fn_graph[res_key]
dask_args = tuple(param_info.keys())
if _is_curry_func(fn):
# wrap the fn but persist the args, and kargs on it
args = tuple([default_args.get(p, p) for p in param_info.keys()])
set_varargs, set_kargs = _partial_inputs(fn)
def wrapper(*args):
kwargs = t.merge(set_kargs, dict(zip(param_info.keys(), args)))
return fn(*set_varargs, **kwargs)
wrapper.__name__= _func_name(fn)
# we maintain the curry/partial func info
wrapper.func = _func_name(fn)
wrapper.keywords = fn.keywords
wrapper.args = fn.args
return (wrapper,) + dask_args
return (fn,) + dask_args
base_dask = {k:to_task(k, param_info)
for k, param_info in fn_param_info.items()}
outputs = list(fn_graph.keys())
def funk(get=get, **kargs):
param_keys = set(kargs.keys())
missing_keys = required_params - param_keys
if missing_keys:
raise TypeError(
'missing these keyword arguments: {}'.format(missing_keys))
extra_keys = param_keys - all_params
if extra_keys:
raise TypeError(
'unexpected keyword arguments passed in: {}'.format(extra_keys))
dsk = t.merge(base_dask, default_args, kargs)
res = get(dsk, outputs)
return dict(zip(outputs, res))
funk.required = required_params
funk.defaults = default_args
funk.base_dask = base_dask
funk.full_dask = t.merge(base_dask,
dict(zip(all_params,
repeat(_UNSPECIFIED))))
# TODO: use bolton's FunctionBuilder to set kargs so it has a useful function signature
return funk
| {
"repo_name": "Savvysherpa/dask-funk",
"path": "daskfunk/core.py",
"copies": "1",
"size": "4692",
"license": "mit",
"hash": -1319164392241059300,
"line_mean": 33.5,
"line_max": 91,
"alpha_frac": 0.6054987212,
"autogenerated": false,
"ratio": 3.5518546555639667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4657353376763967,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import contextlib
import copy
import hashlib
import json
import os
from os.path import isfile, join
import re
import sys
import time
from bs4 import UnicodeDammit
from .conda_interface import iteritems, PY3, text_type
from .conda_interface import md5_file
from .conda_interface import non_x86_linux_machines
from .conda_interface import MatchSpec
from .conda_interface import envs_dirs
from .conda_interface import string_types
from conda_build import exceptions, utils, variants, environ
from conda_build.conda_interface import memoized
from conda_build.features import feature_list
from conda_build.config import Config, get_or_merge_config
from conda_build.utils import (ensure_list, find_recipe, expand_globs, get_installed_packages,
HashableDict, insert_variant_versions)
from conda_build.license_family import ensure_valid_license_family
try:
import yaml
except ImportError:
sys.exit('Error: could not import yaml (required to read meta.yaml '
'files of conda recipes)')
try:
loader = yaml.CLoader
except:
loader = yaml.Loader
on_win = (sys.platform == 'win32')
# arches that don't follow exact names in the subdir need to be mapped here
ARCH_MAP = {'32': 'x86',
'64': 'x86_64'}
NOARCH_TYPES = ('python', 'generic', True)
# we originally matched outputs based on output name. Unfortunately, that
# doesn't work when outputs are templated - we want to match un-rendered
# text, but we have rendered names.
# We overcome that divide by finding the output index in a rendered set of
# outputs, so our names match, then we use that numeric index with this
# regex, which extract all outputs in order.
# Stop condition is one of 3 things:
# \w at the start of a line (next top-level section)
# \Z (end of file)
# next output, as delineated by "- name" or "- type"
output_re = re.compile(r"^\ +-\ +(?:name|type):.+?(?=^\w|\Z|^\ +-\ +(?:name|type))",
flags=re.M | re.S)
numpy_xx_re = re.compile(r'(numpy\s*x\.x)|pin_compatible\([\'\"]numpy.*max_pin=[\'\"]x\.x[\'\"]')
# TODO: there's probably a way to combine these, but I can't figure out how to many the x
# capturing group optional.
numpy_compatible_x_re = re.compile(
r'pin_\w+\([\'\"]numpy[\'\"].*((?<=x_pin=[\'\"])[x\.]*(?=[\'\"]))')
numpy_compatible_re = re.compile(r"pin_\w+\([\'\"]numpy[\'\"]")
# used to avoid recomputing/rescanning recipe contents for used variables
used_vars_cache = {}
def ns_cfg(config):
# Remember to update the docs of any of this changes
plat = config.host_subdir
d = dict(
linux=plat.startswith('linux-'),
linux32=bool(plat == 'linux-32'),
linux64=bool(plat == 'linux-64'),
arm=plat.startswith('linux-arm'),
osx=plat.startswith('osx-'),
unix=plat.startswith(('linux-', 'osx-')),
win=plat.startswith('win-'),
win32=bool(plat == 'win-32'),
win64=bool(plat == 'win-64'),
x86=plat.endswith(('-32', '-64')),
x86_64=plat.endswith('-64'),
os=os,
environ=os.environ,
nomkl=bool(int(os.environ.get('FEATURE_NOMKL', False)))
)
defaults = variants.get_default_variant(config)
py = config.variant.get('python', defaults['python'])
# there are times when python comes in as a tuple
if not hasattr(py, 'split'):
py = py[0]
py = int("".join(py.split('.')[:2]))
d.update(dict(py=py,
py3k=bool(30 <= py < 40),
py2k=bool(20 <= py < 30),
py26=bool(py == 26),
py27=bool(py == 27),
py33=bool(py == 33),
py34=bool(py == 34),
py35=bool(py == 35),
py36=bool(py == 36),))
np = config.variant.get('numpy')
if not np:
np = defaults['numpy']
if config.verbose:
utils.get_logger(__name__).warn("No numpy version specified in conda_build_config.yaml. "
"Falling back to default numpy value of {}".format(defaults['numpy']))
d['np'] = int("".join(np.split('.')[:2]))
pl = config.variant.get('perl', defaults['perl'])
d['pl'] = pl
lua = config.variant.get('lua', defaults['lua'])
d['lua'] = lua
d['luajit'] = bool(lua[0] == "2")
for machine in non_x86_linux_machines:
d[machine] = bool(plat == 'linux-%s' % machine)
for feature, value in feature_list:
d[feature] = value
d.update(os.environ)
# here we try to do some type conversion for more intuitive usage. Otherwise,
# values like 35 are strings by default, making relational operations confusing.
# We also convert "True" and things like that to booleans.
for k, v in config.variant.items():
if k not in d:
try:
d[k] = int(v)
except (TypeError, ValueError):
if isinstance(v, string_types) and v.lower() in ('false', 'true'):
v = v.lower() == 'true'
d[k] = v
return d
# Selectors must be either:
# - at end of the line
# - embedded (anywhere) within a comment
#
# Notes:
# - [([^\[\]]+)\] means "find a pair of brackets containing any
# NON-bracket chars, and capture the contents"
# - (?(2)[^\(\)]*)$ means "allow trailing characters iff group 2 (#.*) was found."
# Skip markdown link syntax.
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[([^\[\]]+)\](?(2)[^\(\)]*)$')
# this function extracts the variable name from a NameError exception, it has the form of:
# "NameError: name 'var' is not defined", where var is the variable that is not defined. This gets
# returned
def parseNameNotFound(error):
m = re.search('\'(.+?)\'', str(error))
if len(m.groups()) == 1:
return m.group(1)
else:
return ""
# We evaluate the selector and return True (keep this line) or False (drop this line)
# If we encounter a NameError (unknown variable in selector), then we replace it by False and
# re-run the evaluation
def eval_selector(selector_string, namespace, variants_in_place):
try:
# TODO: is there a way to do this without eval? Eval allows arbitrary
# code execution.
return eval(selector_string, namespace, {})
except NameError as e:
missing_var = parseNameNotFound(e)
if variants_in_place:
log = utils.get_logger(__name__)
log.debug("Treating unknown selector \'" + missing_var +
"\' as if it was False.")
next_string = selector_string.replace(missing_var, "False")
return eval_selector(next_string, namespace, variants_in_place)
def select_lines(data, namespace, variants_in_place):
lines = []
for i, line in enumerate(data.splitlines()):
line = line.rstrip()
trailing_quote = ""
if line and line[-1] in ("'", '"'):
trailing_quote = line[-1]
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
try:
if eval_selector(cond, namespace, variants_in_place):
lines.append(m.group(1) + trailing_quote)
except Exception as e:
sys.exit('''\
Error: Invalid selector in meta.yaml line %d:
offending line:
%s
exception:
%s
''' % (i + 1, line, str(e)))
else:
lines.append(line)
return '\n'.join(lines) + '\n'
def yamlize(data):
try:
with stringify_numbers():
loaded_data = yaml.load(data, Loader=loader)
return loaded_data
except yaml.error.YAMLError as e:
if '{{' in data:
try:
import jinja2
jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused
except ImportError:
raise exceptions.UnableToParseMissingJinja2(original=e)
print("Problematic recipe:", file=sys.stderr)
print(data, file=sys.stderr)
raise exceptions.UnableToParse(original=e)
def ensure_valid_fields(meta):
pin_depends = meta.get('build', {}).get('pin_depends', '')
if pin_depends and pin_depends not in ('', 'record', 'strict'):
raise RuntimeError("build/pin_depends must be 'record' or 'strict' - "
"not '%s'" % pin_depends)
def _trim_None_strings(meta_dict):
log = utils.get_logger(__name__)
for key, value in meta_dict.items():
if hasattr(value, 'keys'):
meta_dict[key] = _trim_None_strings(value)
elif value and hasattr(value, '__iter__') or isinstance(value, string_types):
if isinstance(value, string_types):
meta_dict[key] = None if 'None' in value else value
else:
# support lists of dicts (homogeneous)
keep = []
if hasattr(next(iter(value)), 'keys'):
for d in value:
trimmed_dict = _trim_None_strings(d)
if trimmed_dict:
keep.append(trimmed_dict)
# support lists of strings (homogeneous)
else:
keep = [i for i in value if i not in ('None', 'NoneType')]
meta_dict[key] = keep
else:
log.debug("found unrecognized data type in dictionary: {0}, type: {1}".format(value,
type(value)))
return meta_dict
def ensure_valid_noarch_value(meta):
build_noarch = meta.get('build', {}).get('noarch')
if build_noarch and build_noarch not in NOARCH_TYPES:
raise exceptions.CondaBuildException("Invalid value for noarch: %s" % build_noarch)
def _get_all_dependencies(metadata, envs=('host', 'build', 'run')):
reqs = []
for _env in envs:
reqs.extend(metadata.meta.get('requirements', {}).get(_env, []))
return reqs
def check_circular_dependencies(render_order):
pairs = []
for idx, m in enumerate(render_order.values()):
for other_m in list(render_order.values())[idx + 1:]:
if (any(m.name() == dep or dep.startswith(m.name() + ' ')
for dep in _get_all_dependencies(other_m)) and
any(other_m.name() == dep or dep.startswith(other_m.name() + ' ')
for dep in _get_all_dependencies(m))):
pairs.append((m.name(), other_m.name()))
if pairs:
error = "Circular dependencies in recipe: \n"
for pair in pairs:
error += " {0} <-> {1}\n".format(*pair)
raise exceptions.RecipeError(error)
def _variants_equal(metadata, output_metadata):
match = True
for key, val in metadata.config.variant.items():
if key in output_metadata.config.variant and val != output_metadata.config.variant[key]:
match = False
return match
def ensure_matching_hashes(output_metadata):
envs = 'build', 'host', 'run'
problemos = []
for (_, m) in output_metadata.values():
for (_, om) in output_metadata.values():
if m != om:
run_exports = om.meta.get('build', {}).get('run_exports', [])
if hasattr(run_exports, 'keys'):
run_exports = run_exports.get('strong', []) + run_exports.get('weak', [])
deps = _get_all_dependencies(om, envs) + run_exports
for dep in deps:
if (dep.startswith(m.name() + ' ') and len(dep.split(' ')) == 3 and
dep.split(' ')[-1] != m.build_id() and _variants_equal(m, om)):
problemos.append((m.name(), m.build_id(), dep, om.name()))
if problemos:
error = ""
for prob in problemos:
error += "Mismatching package: {} (id {}); dep: {}; consumer package: {}\n".format(*prob)
raise exceptions.RecipeError("Mismatching hashes in recipe. Exact pins in dependencies "
"that contribute to the hash often cause this. Can you "
"change one or more exact pins to version bound constraints?\n"
"Involved packages were:\n" + error)
def parse(data, config, path=None):
data = select_lines(data, ns_cfg(config), variants_in_place=bool(config.variant))
res = yamlize(data)
# ensure the result is a dict
if res is None:
res = {}
for field in FIELDS:
if field not in res:
continue
# ensure that empty fields are dicts (otherwise selectors can cause invalid fields)
if not res[field]:
res[field] = {}
# source field may be either a dictionary, or a list of dictionaries
if field in OPTIONALLY_ITERABLE_FIELDS:
if not (isinstance(res[field], dict) or (hasattr(res[field], '__iter__') and not
isinstance(res[field], string_types))):
raise RuntimeError("The %s field should be a dict or list of dicts, not "
"%s in file %s." % (field, res[field].__class__.__name__, path))
else:
if not isinstance(res[field], dict):
raise RuntimeError("The %s field should be a dict, not %s in file %s." %
(field, res[field].__class__.__name__, path))
ensure_valid_fields(res)
ensure_valid_license_family(res)
ensure_valid_noarch_value(res)
return sanitize(res)
trues = {'y', 'on', 'true', 'yes'}
falses = {'n', 'no', 'false', 'off'}
default_structs = {
'build/entry_points': list,
'build/features': list,
'source/patches': list,
'build/script': list,
'build/script_env': list,
'build/run_exports': list,
'build/track_features': list,
'build/osx_is_app': bool,
'build/preserve_egg_dir': bool,
'build/binary_relocation': bool,
'build/noarch': text_type,
'build/noarch_python': bool,
'build/detect_binary_files_with_prefix': bool,
'build/skip': bool,
'build/skip_compile_pyc': list,
'build/preferred_env': text_type,
'build/preferred_env_executable_paths': list,
'build/ignore_run_exports': list,
'build/requires_features': dict,
'build/provides_features': dict,
'build/pre-link': text_type,
'build/post-link': text_type,
'build/pre-unlink': text_type,
'build/string': text_type,
'build/pin_depends': text_type,
'build/force_use_keys': list,
'build/force_ignore_keys': list,
'build/merge_build_host': bool,
'build/msvc_compiler': text_type,
'requirements/build': list,
'requirements/host': list,
'requirements/run': list,
'requirements/conflicts': list,
'requirements/run_constrained': list,
'test/requires': list,
'test/files': list,
'test/source_files': list,
'test/commands': list,
'test/imports': list,
'test/downstreams': list,
'package/version': text_type,
'source/svn_rev': text_type,
'source/git_tag': text_type,
'source/git_branch': text_type,
'source/md5': text_type,
'source/git_rev': text_type,
'source/path': text_type,
'source/git_url': text_type,
'app/own_environment': bool,
'about/identifiers': list,
'about/keywords': list,
'about/tags': list,
}
def sanitize(meta):
"""
Sanitize the meta-data to remove aliases/handle deprecation
"""
sanitize_funs = {'source': [_git_clean], 'package': [_str_version], 'build': [_str_version]}
for section, funs in sanitize_funs.items():
if section in meta:
for func in funs:
section_data = meta[section]
# section is a dictionary
if hasattr(section_data, 'keys'):
section_data = func(section_data)
# section is a list of dictionaries
else:
section_data = [func(_d) for _d in section_data]
meta[section] = section_data
return meta
def _git_clean(source_meta):
"""
Reduce the redundancy in git specification by removing git_tag and
git_branch.
If one is specified, copy to git_rev.
If more than one field is used to specified, exit
and complain.
"""
git_rev_tags_old = ('git_branch', 'git_tag')
git_rev = 'git_rev'
git_rev_tags = (git_rev,) + git_rev_tags_old
has_rev_tags = tuple(bool(source_meta.get(tag, text_type())) for
tag in git_rev_tags)
if sum(has_rev_tags) > 1:
msg = "Error: multiple git_revs:"
msg += ', '.join("{}".format(key) for key, has in
zip(git_rev_tags, has_rev_tags) if has)
sys.exit(msg)
# make a copy of the input so we have no side-effects
ret_meta = source_meta.copy()
# loop over the old versions
for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):
# update if needed
if has:
ret_meta[git_rev_tags[0]] = ret_meta[key]
# and remove
ret_meta.pop(key, None)
return ret_meta
def _str_version(package_meta):
if 'version' in package_meta:
package_meta['version'] = str(package_meta.get('version', ''))
if 'msvc_compiler' in package_meta:
package_meta['msvc_compiler'] = str(package_meta.get('msvc_compiler', ''))
return package_meta
# If you update this please update the example in
# conda-docs/docs/source/build.rst
FIELDS = {
'package': {'name', 'version'},
'source': {'fn', 'url', 'md5', 'sha1', 'sha256', 'path', 'path_via_symlink',
'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',
'hg_url', 'hg_tag',
'svn_url', 'svn_rev', 'svn_ignore_externals',
'folder', 'no_hoist',
'patches',
},
'build': {'number', 'string', 'entry_points', 'osx_is_app', 'disable_pip',
'features', 'track_features', 'preserve_egg_dir',
'no_link', 'binary_relocation', 'script', 'noarch', 'noarch_python',
'has_prefix_files', 'binary_has_prefix_files', 'ignore_prefix_files',
'detect_binary_files_with_prefix', 'skip_compile_pyc', 'rpaths',
'rpaths_patcher', 'script_env', 'always_include_files', 'skip',
'msvc_compiler', 'pin_depends', 'include_recipe', # pin_depends is experimental still
'preferred_env', 'preferred_env_executable_paths', 'run_exports',
'ignore_run_exports', 'requires_features', 'provides_features',
'force_use_keys', 'force_ignore_keys', 'merge_build_host',
'pre-link', 'post-link', 'pre-unlink', 'missing_dso_whitelist',
'error-overdepending', 'error-overlinking',
},
'outputs': {'name', 'version', 'number', 'script', 'script_interpreter', 'build',
'requirements', 'test', 'about', 'extra', 'files', 'type', 'run_exports'},
'requirements': {'build', 'host', 'run', 'conflicts', 'run_constrained'},
'app': {'entry', 'icon', 'summary', 'type', 'cli_opts',
'own_environment'},
'test': {'requires', 'commands', 'files', 'imports', 'source_files', 'downstreams'},
'about': {'home', 'dev_url', 'doc_url', 'doc_source_url', 'license_url', # these are URLs
'license', 'summary', 'description', 'license_family', # text
'identifiers', 'tags', 'keywords', # lists
'license_file', 'readme', # paths in source tree
},
}
# Fields that may either be a dictionary or a list of dictionaries.
OPTIONALLY_ITERABLE_FIELDS = ('source', 'outputs')
def check_bad_chrs(s, field):
bad_chrs = '=@#$%^&*:;"\'\\|<>?/ '
if field in ('package/version', 'build/string'):
bad_chrs += '-'
if field != 'package/version':
bad_chrs += '!'
for c in bad_chrs:
if c in s:
sys.exit("Error: bad character '%s' in %s: %s" % (c, field, s))
def get_package_version_pin(build_reqs, name):
version = ""
for spec in build_reqs:
if spec.split()[0] == name and len(spec.split()) > 1:
version = spec.split()[1]
return version
def build_string_from_metadata(metadata):
if metadata.meta.get('build', {}).get('string'):
build_str = metadata.get_value('build/string')
else:
res = []
build_or_host = 'host' if metadata.is_cross else 'build'
build_pkg_names = [ms.name for ms in metadata.ms_depends(build_or_host)]
build_deps = metadata.meta.get('requirements', {}).get(build_or_host, [])
# TODO: this is the bit that puts in strings like py27np111 in the filename. It would be
# nice to get rid of this, since the hash supercedes that functionally, but not clear
# whether anyone's tools depend on this file naming right now.
for s, names, places in (('np', 'numpy', 2), ('py', 'python', 2), ('pl', 'perl', 2),
('lua', 'lua', 2), ('r', ('r', 'r-base'), 2),
('mro', 'mro-base', 3), ('mro', 'mro-base_impl', 3)):
for ms in metadata.ms_depends('run'):
for name in ensure_list(names):
if ms.name == name and name in build_pkg_names:
# only append numpy when it is actually pinned
if name == 'numpy' and not metadata.numpy_xx:
continue
if metadata.noarch == name or (metadata.get_value('build/noarch_python') and
name == 'python'):
res.append(s)
else:
pkg_names = list(ensure_list(names))
pkg_names.extend([_n.replace('-', '_')
for _n in ensure_list(names) if '-' in _n])
for _n in pkg_names:
variant_version = (get_package_version_pin(build_deps, _n) or
metadata.config.variant.get(_n.replace('-', '_'),
''))
if variant_version:
break
entry = ''.join([s] + variant_version.split('.')[:places])
if entry not in res:
res.append(entry)
features = ensure_list(metadata.get_value('build/features', []))
if res:
res.append('_')
if features:
res.extend(('_'.join(features), '_'))
res.append('{0}'.format(metadata.build_number() if metadata.build_number() else 0))
build_str = "".join(res)
return build_str
# This really belongs in conda, and it is int conda.cli.common,
# but we don't presently have an API there.
def _get_env_path(env_name_or_path):
if not os.path.isdir(env_name_or_path):
for envs_dir in list(envs_dirs) + [os.getcwd()]:
path = os.path.join(envs_dir, env_name_or_path)
if os.path.isdir(path):
env_name_or_path = path
break
bootstrap_metadir = os.path.join(env_name_or_path, 'conda-meta')
if not os.path.isdir(bootstrap_metadir):
print("Bootstrap environment '%s' not found" % env_name_or_path)
sys.exit(1)
return env_name_or_path
def _get_dependencies_from_environment(env_name_or_path):
path = _get_env_path(env_name_or_path)
# construct build requirements that replicate the given bootstrap environment
# and concatenate them to the build requirements from the recipe
bootstrap_metadata = get_installed_packages(path)
bootstrap_requirements = []
for package, data in bootstrap_metadata.items():
bootstrap_requirements.append("%s %s %s" % (package, data['version'], data['build']))
return {'requirements': {'build': bootstrap_requirements}}
def toposort(output_metadata_map):
'''This function is used to work out the order to run the install scripts
for split packages based on any interdependencies. The result is just
a re-ordering of outputs such that we can run them in that order and
reset the initial set of files in the install prefix after each. This
will naturally lead to non-overlapping files in each package and also
the correct files being present during the install and test procedures,
provided they are run in this order.'''
from .conda_interface import _toposort
# We only care about the conda packages built by this recipe. Non-conda
# packages get sorted to the end.
these_packages = [output_d['name'] for output_d in output_metadata_map
if output_d.get('type', 'conda').startswith('conda')]
topodict = dict()
order = dict()
endorder = set()
for idx, (output_d, output_m) in enumerate(output_metadata_map.items()):
if output_d.get('type', 'conda').startswith('conda'):
deps = (output_m.get_value('requirements/run', []) +
output_m.get_value('requirements/host', []))
if not output_m.is_cross:
deps.extend(output_m.get_value('requirements/build', []))
name = output_d['name']
order[name] = idx
topodict[name] = set()
for dep in deps:
dep = dep.split(' ')[0]
if dep in these_packages:
topodict[name].update((dep,))
else:
endorder.add(idx)
topo_order = list(_toposort(topodict))
keys = [k for pkgname in topo_order for k in output_metadata_map.keys()
if 'name' in k and k['name'] == pkgname]
# not sure that this is working... not everything has 'name', and not sure how this pans out
# may end up excluding packages without the 'name' field
keys.extend([k for pkgname in endorder for k in output_metadata_map.keys()
if ('name' in k and k['name'] == pkgname) or 'name' not in k])
result = OrderedDict()
for key in keys:
result[key] = output_metadata_map[key]
return result
def get_output_dicts_from_metadata(metadata, outputs=None):
outputs = outputs or metadata.get_section('outputs')
if not outputs:
outputs = [{'name': metadata.name()}]
else:
assert not hasattr(outputs, 'keys'), ('outputs specified as dictionary, but must be a '
'list of dictionaries. YAML syntax is: \n\n'
'outputs:\n - name: subpkg\n\n'
'(note the - before the inner dictionary)')
# make a metapackage for the top-level package if the top-level requirements
# mention a subpackage,
# but only if a matching output name is not explicitly provided
if metadata.uses_subpackage and not any(metadata.name() == out.get('name', '')
for out in outputs):
outputs.append(OrderedDict(name=metadata.name()))
for out in outputs:
if 'package:' in metadata.get_recipe_text() and out.get('name') == metadata.name():
combine_top_level_metadata_with_output(metadata, out)
return outputs
def finalize_outputs_pass(base_metadata, render_order, pass_no, outputs=None,
permit_unsatisfiable_variants=False, bypass_env_check=False):
from .render import finalize_metadata
outputs = OrderedDict()
# each of these outputs can have a different set of dependency versions from each other,
# but also from base_metadata
for output_d, metadata in render_order.values():
if metadata.skip():
continue
try:
log = utils.get_logger(__name__)
# We should reparse the top-level recipe to get all of our dependencies fixed up.
# we base things on base_metadata because it has the record of the full origin recipe
if base_metadata.config.verbose:
log.info("Attempting to finalize metadata for {}".format(metadata.name()))
# Using base_metadata is important for keeping the reference to the parent recipe
om = base_metadata.copy()
# other_outputs is the context of what's available for
# pin_subpackage. It's stored on the metadata object here, but not
# on base_metadata, which om is a copy of. Before we do
# re-rendering of om's metadata, we need to have other_outputs in
# place, so it can refer to it for any pin_subpackage stuff it has.
om.other_outputs = metadata.other_outputs
om.config.variant = metadata.config.variant
parent_metadata = om.copy()
om.other_outputs.update(outputs)
om.final = False
# get the new output_d from the reparsed top-level metadata, so that we have any
# exact subpackage version/build string info
output_d = om.get_rendered_output(metadata.name()) or {'name': metadata.name()}
om = om.get_output_metadata(output_d)
parent_metadata.parse_until_resolved()
if not bypass_env_check:
fm = finalize_metadata(om, parent_metadata=parent_metadata,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
else:
fm = om
if not output_d.get('type') or output_d.get('type').startswith('conda'):
outputs[(fm.name(), HashableDict({k: fm.config.variant[k]
for k in fm.get_used_vars()}))] = (output_d, fm)
except exceptions.DependencyNeedsBuildingError as e:
if not permit_unsatisfiable_variants:
raise
else:
log = utils.get_logger(__name__)
log.warn("Could not finalize metadata due to missing dependencies: "
"{}".format(e.packages))
outputs[(metadata.name(), HashableDict({k: metadata.config.variant[k]
for k in metadata.get_used_vars()}))] = (
output_d, metadata)
# in-place modification
base_metadata.other_outputs = outputs
base_metadata.final = False
final_outputs = OrderedDict()
for k, (out_d, m) in outputs.items():
final_outputs[(m.name(), HashableDict({k: m.config.variant[k]
for k in m.get_used_vars()}))] = out_d, m
return final_outputs
def get_updated_output_dict_from_reparsed_metadata(original_dict, new_outputs):
output_d = original_dict
if 'name' in original_dict:
output_ds = [out for out in new_outputs if 'name' in out and
out['name'] == original_dict['name']]
assert len(output_ds) == 1
output_d = output_ds[0]
return output_d
def _filter_recipe_text(text, extract_pattern=None):
if extract_pattern:
match = re.search(extract_pattern, text, flags=re.MULTILINE | re.DOTALL)
text = "\n".join(set(string for string in match.groups() if string)) if match else ""
return text
@memoized
def read_meta_file(meta_path):
with open(meta_path, 'rb') as f:
recipe_text = UnicodeDammit(f.read()).unicode_markup
if PY3 and hasattr(recipe_text, 'decode'):
recipe_text = recipe_text.decode()
return recipe_text
def combine_top_level_metadata_with_output(metadata, output):
"""Merge top-level metadata into output when output is same name as top-level"""
sections = ('requirements', 'build', 'about')
for section in sections:
metadata_section = metadata.meta.get(section, {}) or {}
output_section = output.get(section, {}) or {}
if section == 'requirements':
output_section = utils.expand_reqs(output.get(section, {}))
for k, v in metadata_section.items():
if k not in output_section:
output_section[k] = v
output[section] = output_section
# synchronize them
metadata.meta[section] = output_section
def trim_build_only_deps(metadata, requirements_used):
'''things can be used as dependencies or elsewhere in the recipe. If it's only used
elsewhere, keep it. If it's a dep-related thing, only keep it if
it's in the build deps.'''
# filter out things that occur only in run requirements. These don't actually affect the
# outcome of the package.
output_reqs = utils.expand_reqs(metadata.meta.get('requirements', {}))
build_reqs = utils.ensure_list(output_reqs.get('build', []))
host_reqs = utils.ensure_list(output_reqs.get('host', []))
run_reqs = output_reqs.get('run', [])
build_reqs = {req.split()[0].replace('-', '_') for req in build_reqs if req}
host_reqs = {req.split()[0].replace('-', '_') for req in host_reqs if req}
to_remove = set()
ignore_build_only_deps = utils.ensure_list(metadata.config.variant.get('ignore_build_only_deps', []))
for dep in requirements_used:
# filter out stuff that's only in run deps
if dep in run_reqs:
if (dep not in build_reqs and
dep not in host_reqs and
dep in requirements_used):
to_remove.add(dep)
else:
if (dep in build_reqs and
dep not in host_reqs and
dep in requirements_used and
dep in ignore_build_only_deps):
to_remove.add(dep)
return requirements_used - to_remove
@contextlib.contextmanager
def stringify_numbers():
# ensure that numbers are not interpreted as ints or floats. That trips up versions
# with trailing zeros.
implicit_resolver_backup = loader.yaml_implicit_resolvers.copy()
for ch in list(u'0123456789'):
if ch in loader.yaml_implicit_resolvers:
del loader.yaml_implicit_resolvers[ch]
yield
for ch in list(u'0123456789'):
if ch in implicit_resolver_backup:
loader.yaml_implicit_resolvers[ch] = implicit_resolver_backup[ch]
class MetaData(object):
def __init__(self, path, config=None, variant=None):
self.undefined_jinja_vars = []
self.config = get_or_merge_config(config, variant=variant)
if isfile(path):
self._meta_path = path
self._meta_name = os.path.basename(path)
self.path = os.path.dirname(path)
else:
self._meta_path = find_recipe(path)
self._meta_name = 'meta.yaml'
self.path = os.path.dirname(self.meta_path)
self.requirements_path = join(self.path, 'requirements.txt')
# Start with bare-minimum contents so we can call environ.get_dict() with impunity
# We'll immediately replace these contents in parse_again()
self.meta = dict()
# This is the 'first pass' parse of meta.yaml, so not all variables are defined yet
# (e.g. GIT_FULL_HASH, etc. are undefined)
# Therefore, undefined jinja variables are permitted here
# In the second pass, we'll be more strict. See build.build()
# Primarily for debugging. Ensure that metadata is not altered after "finalizing"
self.parse_again(permit_undefined_jinja=True, allow_no_other_outputs=True)
self.config.disable_pip = self.disable_pip
# establish whether this recipe should squish build and host together
@property
def is_cross(self):
return (bool(self.get_depends_top_and_out('host')) or
'host' in self.meta.get('requirements', {}))
@property
def final(self):
return self.get_value('extra/final')
@final.setter
def final(self, boolean):
extra = self.meta.get('extra', {})
extra['final'] = boolean
self.meta['extra'] = extra
@property
def disable_pip(self):
return self.config.disable_pip or ('build' in self.meta and
'disable_pip' in self.meta['build'])
@disable_pip.setter
def disable_pip(self, value):
self.config.disable_pip = value
build = self.meta.get('build', {})
build['disable_pip'] = value
self.meta['build'] = build
def append_metadata_sections(self, sections_file_or_dict, merge, raise_on_clobber=False):
"""Append to or replace subsections to meta.yaml
This is used to alter input recipes, so that a given requirement or
setting is applied without manually altering the input recipe. It is
intended for vendors who want to extend existing recipes without
necessarily removing information. pass merge=False to replace sections.
"""
if hasattr(sections_file_or_dict, 'keys'):
build_config = sections_file_or_dict
else:
with open(sections_file_or_dict) as configfile:
build_config = parse(configfile.read(), config=self.config)
utils.merge_or_update_dict(self.meta, build_config, self.path, merge=merge,
raise_on_clobber=raise_on_clobber)
@property
def is_output(self):
self_name = self.name(fail_ok=True)
parent_name = self.meta.get('extra', {}).get('parent_recipe', {}).get('name')
return bool(parent_name) and parent_name != self_name
def parse_again(self, permit_undefined_jinja=False, allow_no_other_outputs=False,
bypass_env_check=False, **kw):
"""Redo parsing for key-value pairs that are not initialized in the
first pass.
config: a conda-build Config object. If None, the config object passed at creation
time is used.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
"""
assert not self.final, "modifying metadata after finalization"
log = utils.get_logger(__name__)
if kw:
log.warn("using unsupported internal conda-build function `parse_again`. Please use "
"conda_build.api.render instead.")
append_sections_file = None
clobber_sections_file = None
# we sometimes create metadata from dictionaries, in which case we'll have no path
if self.meta_path:
self.meta = parse(self._get_contents(permit_undefined_jinja,
allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check),
config=self.config,
path=self.meta_path)
append_sections_file = os.path.join(self.path, 'recipe_append.yaml')
clobber_sections_file = os.path.join(self.path, 'recipe_clobber.yaml')
append_sections_file = self.config.append_sections_file or append_sections_file
if append_sections_file and not os.path.isfile(append_sections_file):
log.debug('input append sections file did not exist: %s', append_sections_file)
append_sections_file = None
clobber_sections_file = self.config.clobber_sections_file or clobber_sections_file
if clobber_sections_file and not os.path.isfile(clobber_sections_file):
log.debug('input clobber sections file did not exist: %s', clobber_sections_file)
clobber_sections_file = None
if append_sections_file:
self.append_metadata_sections(append_sections_file, merge=True)
if clobber_sections_file:
self.append_metadata_sections(clobber_sections_file, merge=False)
if self.config.bootstrap:
dependencies = _get_dependencies_from_environment(self.config.bootstrap)
self.append_metadata_sections(dependencies, merge=True)
if self.meta.get('build', {}).get('error_overlinking', False):
self.config.error_overlinking = self.meta['build']['error_overlinking']
if self.meta.get('build', {}).get('error_overdepending', False):
self.config.error_overdepending = self.meta['build']['error_overdepending']
self.validate_features()
self.ensure_no_pip_requirements()
def ensure_no_pip_requirements(self):
keys = 'requirements/build', 'requirements/run', 'test/requires'
for key in keys:
if any(hasattr(item, 'keys') for item in (self.get_value(key) or [])):
raise ValueError("Dictionaries are not supported as values in requirements sections"
". Note that pip requirements as used in conda-env "
"environment.yml files are not supported by conda-build.")
def append_requirements(self):
"""For dynamic determination of build or run reqs, based on configuration"""
reqs = self.meta.get('requirements', {})
run_reqs = reqs.get('run', [])
if bool(self.get_value('build/osx_is_app', False)) and self.config.platform == 'osx':
if 'python.app' not in run_reqs:
run_reqs.append('python.app')
self.meta['requirements'] = reqs
def parse_until_resolved(self, allow_no_other_outputs=False, bypass_env_check=False):
"""variant contains key-value mapping for additional functions and values
for jinja2 variables"""
# undefined_jinja_vars is refreshed by self.parse again
undefined_jinja_vars = ()
# store the "final" state that we think we're in. reloading the meta.yaml file
# can reset it (to True)
final = self.final
# always parse again at least once.
self.parse_again(permit_undefined_jinja=True, allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
self.final = final
while set(undefined_jinja_vars) != set(self.undefined_jinja_vars):
undefined_jinja_vars = self.undefined_jinja_vars
self.parse_again(permit_undefined_jinja=True,
allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
self.final = final
if undefined_jinja_vars:
self.parse_again(permit_undefined_jinja=False,
allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
sys.exit("Undefined Jinja2 variables remain ({}). Please enable "
"source downloading and try again.".format(self.undefined_jinja_vars))
# always parse again at the end, too.
self.parse_again(permit_undefined_jinja=False,
allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
self.final = final
@classmethod
def fromstring(cls, metadata, config=None, variant=None):
m = super(MetaData, cls).__new__(cls)
if not config:
config = Config()
m.meta = parse(metadata, config=config, path='', variant=variant)
m.config = config
m.parse_again(permit_undefined_jinja=True)
return m
@classmethod
def fromdict(cls, metadata, config=None, variant=None):
"""
Create a MetaData object from metadata dict directly.
"""
m = super(MetaData, cls).__new__(cls)
m.path = ''
m._meta_path = ''
m.requirements_path = ''
m.meta = sanitize(metadata)
if not config:
config = Config(variant=variant)
m.config = config
m.undefined_jinja_vars = []
m.final = False
return m
def get_section(self, section):
return self.meta.get(section, {})
def get_value(self, name, default=None, autotype=True):
"""
Get a value from a meta.yaml.
:param field: Field to return, e.g. 'package/name'.
If the section might be a list, specify an index,
e.g. 'source/0/git_url'.
:param default: Default object to return if field doesn't exist
:param autotype: If True, return the default type of field if one exists.
False will return the default object.
:return: The named value from meta.yaml
"""
names = name.split('/')
assert len(names) in (2, 3), "Bad field name: " + name
if len(names) == 2:
section, key = names
index = None
elif len(names) == 3:
section, index, key = names
assert section == 'source', "Section is not a list: " + section
index = int(index)
# get correct default
field = section + '/' + key
if autotype and default is None and field in default_structs:
default = default_structs[field]()
section_data = self.get_section(section)
if isinstance(section_data, dict):
assert not index, \
"Got non-zero index ({}), but section {} is not a list.".format(index, section)
elif isinstance(section_data, list):
# The 'source' section can be written a list, in which case the name
# is passed in with an index, e.g. get_value('source/0/git_url')
if index is None:
log = utils.get_logger(__name__)
log.warn("No index specified in get_value('{}'). Assuming index 0.".format(name))
index = 0
if len(section_data) == 0:
section_data = {}
else:
section_data = section_data[index]
assert isinstance(section_data, dict), \
"Expected {}/{} to be a dict".format(section, index)
value = section_data.get(key, default)
# handle yaml 1.1 boolean values
if isinstance(value, text_type):
if value.lower() in trues:
value = True
elif value.lower() in falses:
value = False
if value is None:
value = default
return value
def check_fields(self):
def check_field(key, section):
if key not in FIELDS[section]:
raise ValueError("in section %r: unknown key %r" %
(section, key))
for section, submeta in iteritems(self.meta):
# anything goes in the extra section
if section == 'extra':
continue
if section not in FIELDS:
raise ValueError("unknown section: %s" % section)
for key_or_dict in submeta:
if section in OPTIONALLY_ITERABLE_FIELDS and isinstance(key_or_dict, dict):
for key in key_or_dict.keys():
check_field(key, section)
else:
check_field(key_or_dict, section)
return True
def name(self, fail_ok=False):
res = self.meta.get('package', {}).get('name', '')
if not res and not fail_ok:
sys.exit('Error: package/name missing in: %r' % self.meta_path)
res = text_type(res)
if res != res.lower():
sys.exit('Error: package/name must be lowercase, got: %r' % res)
check_bad_chrs(res, 'package/name')
return res
def version(self):
res = str(self.get_value('package/version'))
if res is None:
sys.exit("Error: package/version missing in: %r" % self.meta_path)
check_bad_chrs(res, 'package/version')
if self.final and res.startswith('.'):
raise ValueError("Fully-rendered version can't start with period - got %s", res)
return res
def build_number(self):
number = self.get_value('build/number')
# build number can come back as None if no setting (or jinja intermediate)
try:
build_int = int(number)
except (ValueError, TypeError):
build_int = ""
return build_int
def get_depends_top_and_out(self, typ):
meta_requirements = ensure_list(self.get_value('requirements/' + typ, []))[:]
req_names = set(req.split()[0] for req in meta_requirements if req)
extra_reqs = []
# this is for the edge case of requirements for top-level being also partially defined in a similarly named output
if not self.is_output:
matching_output = [out for out in self.meta.get('outputs', []) if
out.get('name') == self.name()]
if matching_output:
extra_reqs = utils.expand_reqs(
matching_output[0].get('requirements', [])).get(typ, [])
extra_reqs = [dep for dep in extra_reqs if dep.split()[0] not in req_names]
meta_requirements = [req for req in (set(meta_requirements) | set(extra_reqs)) if req]
return meta_requirements
def ms_depends(self, typ='run'):
names = ('python', 'numpy', 'perl', 'lua')
name_ver_list = [(name, self.config.variant[name])
for name in names
if self.config.variant.get(name)]
if self.config.variant.get('r_base'):
# r is kept for legacy installations, r-base deprecates it.
name_ver_list.extend([('r', self.config.variant['r_base']),
('r-base', self.config.variant['r_base']),
])
specs = OrderedDict()
for spec in ensure_list(self.get_value('requirements/' + typ, [])):
if not spec:
continue
try:
ms = MatchSpec(spec)
except AssertionError:
raise RuntimeError("Invalid package specification: %r" % spec)
except (AttributeError, ValueError) as e:
raise RuntimeError("Received dictionary as spec. Note that pip requirements are "
"not supported in conda-build meta.yaml. Error message: " + str(e))
if ms.name == self.name():
raise RuntimeError("%s cannot depend on itself" % self.name())
for name, ver in name_ver_list:
if ms.name == name:
if self.noarch:
continue
for c in '=!@#$%^&*:;"\'\\|<>?/':
if c in ms.name:
sys.exit("Error: bad character '%s' in package name "
"dependency '%s'" % (c, ms.name))
parts = spec.split()
if len(parts) >= 2:
if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:
msg = ("Error: bad character '%s' in package version "
"dependency '%s'" % (parts[1], ms.name))
if len(parts) >= 3:
msg += "\nPerhaps you meant '%s %s%s'" % (ms.name,
parts[1], parts[2])
sys.exit(msg)
specs[spec] = ms
return list(specs.values())
def get_hash_contents(self):
"""
# A hash will be added if all of these are true for any dependency:
#
# 1. package is an explicit dependency in build, host, or run deps
# 2. package has a matching entry in conda_build_config.yaml which is a pin to a specific
# version, not a lower bound
# 3. that package is not ignored by ignore_version
#
# The hash is computed based on the pinning value, NOT the build
# dependency build string. This means hashes won't change as often,
# but it also means that if run_exports is overly permissive,
# software may break more often.
#
# A hash will also ALWAYS be added when a compiler package is a build
# or host dependency. Reasoning for that is that the compiler
# package represents compiler flags and other things that can and do
# dramatically change compatibility. It is much more risky to drop
# this info (by dropping the hash) than it is for other software.
# used variables - anything with a value in conda_build_config.yaml that applies to this
# recipe. Includes compiler if compiler jinja2 function is used.
"""
dependencies = set(self.get_used_vars())
trim_build_only_deps(self, dependencies)
# filter out ignored versions
build_string_excludes = ['python', 'r_base', 'perl', 'lua', 'target_platform']
build_string_excludes.extend(ensure_list(self.config.variant.get('ignore_version', [])))
if 'numpy' in dependencies:
pin_compatible, not_xx = self.uses_numpy_pin_compatible_without_xx
# numpy_xx means it is accounted for in the build string, with npXYY
# if not pin_compatible, then we don't care about the usage, and omit it from the hash.
if self.numpy_xx or (pin_compatible and not not_xx):
build_string_excludes.append('numpy')
# always exclude older stuff that's always in the build string (py, np, pl, r, lua)
if build_string_excludes:
exclude_pattern = re.compile('|'.join('{}[\s$]?.*'.format(exc)
for exc in build_string_excludes))
dependencies = [req for req in dependencies if not exclude_pattern.match(req)]
# retrieve values - this dictionary is what makes up the hash.
return {key: self.config.variant[key] for key in dependencies}
def hash_dependencies(self):
"""With arbitrary pinning, we can't depend on the build string as done in
build_string_from_metadata - there's just too much info. Instead, we keep that as-is, to
not be disruptive, but we add this extra hash, which is just a way of distinguishing files
on disk. The actual determination of dependencies is done in the repository metadata.
This was revised in conda-build 3.1.0: hashing caused too many package
rebuilds. We reduce the scope to include only the pins added by conda_build_config.yaml,
and no longer hash files that contribute to the recipe.
"""
hash_ = ''
hashing_dependencies = self.get_hash_contents()
if hashing_dependencies:
hash_ = hashlib.sha1(json.dumps(hashing_dependencies, sort_keys=True).encode())
# save only the first HASH_LENGTH characters - should be more than
# enough, since these only need to be unique within one version
# plus one is for the h - zero pad on the front, trim to match HASH_LENGTH
hash_ = 'h{0}'.format(hash_.hexdigest())[:self.config.hash_length + 1]
return hash_
def build_id(self):
manual_build_string = self.get_value('build/string')
# we need the raw recipe for this metadata (possibly an output), so that we can say whether
# PKG_HASH is used for anything.
raw_recipe_text = self.extract_package_and_build_text()
if not manual_build_string and not raw_recipe_text:
raise RuntimeError("Couldn't extract raw recipe text for {} output".format(self.name()))
raw_recipe_text = self.extract_package_and_build_text()
raw_manual_build_string = re.search("\s*string:", raw_recipe_text)
# user setting their own build string. Don't modify it.
if manual_build_string and not (raw_manual_build_string and
re.findall('h\{\{\s*PKG_HASH\s*\}\}', raw_manual_build_string.string)):
check_bad_chrs(manual_build_string, 'build/string')
out = manual_build_string
else:
# default; build/string not set or uses PKG_HASH variable, so we should fill in the hash
out = build_string_from_metadata(self)
if self.config.filename_hashing and self.final:
hash_ = self.hash_dependencies()
if not re.findall('h[0-9a-f]{%s}' % self.config.hash_length, out):
ret = out.rsplit('_', 1)
try:
int(ret[0])
out = '_'.join((hash_, str(ret[0]))) if hash_ else str(ret[0])
except ValueError:
out = ret[0] + hash_
if len(ret) > 1:
out = '_'.join([out] + ret[1:])
else:
out = re.sub('h[0-9a-f]{%s}' % self.config.hash_length, hash_, out)
return out
def dist(self):
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
def pkg_fn(self):
return "%s.tar.bz2" % self.dist()
def is_app(self):
return bool(self.get_value('app/entry'))
def app_meta(self):
d = {'type': 'app'}
if self.get_value('app/icon'):
d['icon'] = '%s.png' % md5_file(join(
self.path, self.get_value('app/icon')))
for field, key in [('app/entry', 'app_entry'),
('app/type', 'app_type'),
('app/cli_opts', 'app_cli_opts'),
('app/summary', 'summary'),
('app/own_environment', 'app_own_environment')]:
value = self.get_value(field)
if value:
d[key] = value
return d
def info_index(self):
arch = 'noarch' if self.config.target_subdir == 'noarch' else self.config.host_arch
d = dict(
name=self.name(),
version=self.version(),
build=self.build_id(),
build_number=self.build_number() if self.build_number() else 0,
platform=self.config.platform if (self.config.platform != 'noarch' and
arch != 'noarch') else None,
arch=ARCH_MAP.get(arch, arch),
subdir=self.config.target_subdir,
depends=sorted(' '.join(ms.spec.split())
for ms in self.ms_depends()),
timestamp=int(time.time() * 1000),
)
for key in ('license', 'license_family'):
value = self.get_value('about/' + key)
if value:
d[key] = value
preferred_env = self.get_value('build/preferred_env')
if preferred_env:
d['preferred_env'] = preferred_env
# conda 4.4+ optional dependencies
constrains = ensure_list(self.get_value('requirements/run_constrained'))
# filter None values
constrains = [v for v in constrains if v]
if constrains:
d['constrains'] = constrains
if self.get_value('build/features'):
d['features'] = ' '.join(self.get_value('build/features'))
if self.get_value('build/track_features'):
d['track_features'] = ' '.join(self.get_value('build/track_features'))
if self.get_value('build/provides_features'):
d['provides_features'] = self.get_value('build/provides_features')
if self.get_value('build/requires_features'):
d['requires_features'] = self.get_value('build/requires_features')
if self.noarch:
d['platform'] = d['arch'] = None
d['subdir'] = 'noarch'
# These are new-style noarch settings. the self.noarch setting can be True in 2 ways:
# if noarch: True or if noarch_python: True. This is disambiguation.
build_noarch = self.get_value('build/noarch')
if build_noarch:
d['noarch'] = build_noarch
if self.is_app():
d.update(self.app_meta())
return d
def has_prefix_files(self):
ret = ensure_list(self.get_value('build/has_prefix_files', []))
if not isinstance(ret, list):
raise RuntimeError('build/has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/has_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.host_prefix)
def ignore_prefix_files(self):
ret = self.get_value('build/ignore_prefix_files', False)
if type(ret) not in (list, bool):
raise RuntimeError('build/ignore_prefix_files should be boolean or a list of paths '
'(optionally globs)')
if sys.platform == 'win32':
if type(ret) is list and any('\\' in i for i in ret):
raise RuntimeError("build/ignore_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.host_prefix) if type(ret) is list else ret
def always_include_files(self):
files = ensure_list(self.get_value('build/always_include_files', []))
if any('\\' in i for i in files):
raise RuntimeError("build/always_include_files paths must use / "
"as the path delimiter on Windows")
if on_win:
files = [f.replace("/", "\\") for f in files]
return expand_globs(files, self.config.host_prefix)
def ignore_verify_codes(self):
return ensure_list(self.get_value('build/ignore_verify_codes', []))
def binary_relocation(self):
ret = self.get_value('build/binary_relocation', True)
if type(ret) not in (list, bool):
raise RuntimeError('build/binary_relocation should be boolean or a list of paths '
'(optionally globs)')
if sys.platform == 'win32':
if type(ret) is list and any('\\' in i for i in ret):
raise RuntimeError("build/binary_relocation paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.host_prefix) if type(ret) is list else ret
def include_recipe(self):
return self.get_value('build/include_recipe', True)
def binary_has_prefix_files(self):
ret = ensure_list(self.get_value('build/binary_has_prefix_files', []))
if not isinstance(ret, list):
raise RuntimeError('build/binary_has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/binary_has_prefix_files paths must use / "
"as the path delimiter on Windows")
return expand_globs(ret, self.config.host_prefix)
def skip(self):
return self.get_value('build/skip', False)
def _get_contents(self, permit_undefined_jinja, allow_no_other_outputs=False,
bypass_env_check=False, template_string=None, skip_build_id=False,
alt_name=None, variant=None):
'''
Get the contents of our [meta.yaml|conda.yaml] file.
If jinja is installed, then the template.render function is called
before standard conda macro processors.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
'''
try:
import jinja2
except ImportError:
print("There was an error importing jinja2.", file=sys.stderr)
print("Please run `conda install jinja2` to enable jinja template support", file=sys.stderr) # noqa
with open(self.meta_path) as fd:
return fd.read()
from conda_build.jinja_context import context_processor, UndefinedNeverFail, FilteredLoader
path, filename = os.path.split(self.meta_path)
loaders = [ # search relative to '<conda_root>/Lib/site-packages/conda_build/templates'
jinja2.PackageLoader('conda_build'),
# search relative to RECIPE_DIR
jinja2.FileSystemLoader(path)
]
# search relative to current conda environment directory
conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment
if conda_env_path and os.path.isdir(conda_env_path):
conda_env_path = os.path.abspath(conda_env_path)
conda_env_path = conda_env_path.replace('\\', '/') # need unix-style path
env_loader = jinja2.FileSystemLoader(conda_env_path)
loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))
undefined_type = jinja2.StrictUndefined
if permit_undefined_jinja:
# The UndefinedNeverFail class keeps a global list of all undefined names
# Clear any leftover names from the last parse.
UndefinedNeverFail.all_undefined_names = []
undefined_type = UndefinedNeverFail
loader = FilteredLoader(jinja2.ChoiceLoader(loaders), config=self.config)
env = jinja2.Environment(loader=loader, undefined=undefined_type)
env.globals.update(ns_cfg(self.config))
env.globals.update(environ.get_dict(m=self, skip_build_id=skip_build_id))
env.globals.update({"CONDA_BUILD_STATE": "RENDER"})
env.globals.update(context_processor(self, path, config=self.config,
permit_undefined_jinja=permit_undefined_jinja,
allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check,
skip_build_id=skip_build_id, variant=variant))
# override PKG_NAME with custom value. This gets used when an output needs to pretend
# that it is top-level when getting the top-level recipe data.
if alt_name:
env.globals.update({'PKG_NAME': alt_name})
# Future goal here. Not supporting jinja2 on replaced sections right now.
# we write a temporary file, so that we can dynamically replace sections in the meta.yaml
# file on disk. These replaced sections also need to have jinja2 filling in templates.
# The really hard part here is that we need to operate on plain text, because we need to
# keep selectors and all that.
try:
if template_string:
template = env.from_string(template_string)
elif filename:
template = env.get_or_select_template(filename)
else:
template = env.from_string("")
os.environ["CONDA_BUILD_STATE"] = "RENDER"
rendered = template.render(environment=env)
if permit_undefined_jinja:
self.undefined_jinja_vars = UndefinedNeverFail.all_undefined_names
else:
self.undefined_jinja_vars = []
except jinja2.TemplateError as ex:
if "'None' has not attribute" in str(ex):
ex = "Failed to run jinja context function"
sys.exit("Error: Failed to render jinja template in {}:\n{}"
.format(self.meta_path, str(ex)))
finally:
if "CONDA_BUILD_STATE" in os.environ:
del os.environ["CONDA_BUILD_STATE"]
return rendered
def __unicode__(self):
'''
String representation of the MetaData.
'''
return text_type(self.__dict__)
def __str__(self):
if PY3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf-8')
def __repr__(self):
'''
String representation of the MetaData.
'''
return self.__str__()
@property
def meta_path(self):
meta_path = self._meta_path or self.meta.get('extra', {}).get('parent_recipe', {}).get('path', '')
if meta_path and os.path.basename(meta_path) != self._meta_name:
meta_path = os.path.join(meta_path, self._meta_name)
return meta_path
@property
def uses_setup_py_in_meta(self):
meta_text = ''
if self.meta_path:
with open(self.meta_path, 'rb') as f:
meta_text = UnicodeDammit(f.read()).unicode_markup
return u"load_setup_py_data" in meta_text or u"load_setuptools" in meta_text
@property
def uses_regex_in_meta(self):
meta_text = ""
if self.meta_path:
with open(self.meta_path, 'rb') as f:
meta_text = UnicodeDammit(f.read()).unicode_markup
return "load_file_regex" in meta_text
@property
def needs_source_for_render(self):
return self.uses_vcs_in_meta or self.uses_setup_py_in_meta or self.uses_regex_in_meta
@property
def uses_jinja(self):
if not self.meta_path:
return False
with open(self.meta_path, 'rb') as f:
meta_text = UnicodeDammit(f.read()).unicode_markup
matches = re.findall(r"{{.*}}", meta_text)
return len(matches) > 0
@property
def uses_vcs_in_meta(self):
"""returns name of vcs used if recipe contains metadata associated with version control systems.
If this metadata is present, a download/copy will be forced in parse_or_try_download.
"""
vcs = None
vcs_types = ["git", "svn", "hg"]
# We would get here if we use Jinja2 templating, but specify source with path.
if self.meta_path:
with open(self.meta_path, 'rb') as f:
meta_text = UnicodeDammit(f.read()).unicode_markup
for _vcs in vcs_types:
matches = re.findall(r"{}_[^\.\s\'\"]+".format(_vcs.upper()), meta_text)
if len(matches) > 0 and _vcs != self.meta['package']['name']:
if _vcs == "hg":
_vcs = "mercurial"
vcs = _vcs
break
return vcs
@property
def uses_vcs_in_build(self):
build_script = "bld.bat" if on_win else "build.sh"
build_script = os.path.join(self.path, build_script)
for recipe_file in (build_script, self.meta_path):
if os.path.isfile(recipe_file):
vcs_types = ["git", "svn", "hg"]
with open(self.meta_path, 'rb') as f:
build_script = UnicodeDammit(f.read()).unicode_markup
for vcs in vcs_types:
# commands are assumed to have 3 parts:
# 1. the vcs command, optionally with an exe extension
# 2. a subcommand - for example, "clone"
# 3. a target url or other argument
matches = re.findall(r"{}(?:\.exe)?(?:\s+\w+\s+[\w\/\.:@]+)".format(vcs),
build_script, flags=re.IGNORECASE)
if len(matches) > 0 and vcs != self.meta['package']['name']:
if vcs == "hg":
vcs = "mercurial"
return vcs
return None
def get_recipe_text(self, extract_pattern=None, force_top_level=False, apply_selectors=True):
meta_path = self.meta_path
if meta_path:
recipe_text = read_meta_file(meta_path)
if self.is_output and not force_top_level:
recipe_text = self.extract_single_output_text(self.name(), getattr(self, 'type', None))
else:
from conda_build.render import output_yaml
recipe_text = output_yaml(self)
recipe_text = _filter_recipe_text(recipe_text, extract_pattern)
if apply_selectors:
recipe_text = select_lines(recipe_text, ns_cfg(self.config),
variants_in_place=bool(self.config.variant))
return recipe_text.rstrip()
def extract_requirements_text(self, force_top_level=False):
# outputs are already filtered into each output for us
f = r'(^\s*requirements:.*?)(?=^\s*test:|^\s*extra:|^\s*about:|^\s*-\s+name:|^outputs:|\Z)'
if 'package:' in self.get_recipe_text(force_top_level=force_top_level):
# match top-level requirements - start of line means top-level requirements
# ^requirements:.*?
# match output with similar name
# (?:-\s+name:\s+%s.*?)requirements:.*?
# terminate match of other sections
# (?=^\s*-\sname|^\s*test:|^\s*extra:|^\s*about:|^outputs:|\Z)
f = '(^requirements:.*?)(?=^test:|^extra:|^about:|^outputs:|\Z)'
return self.get_recipe_text(f, force_top_level=force_top_level)
def extract_outputs_text(self, apply_selectors=True):
return self.get_recipe_text(r'(^outputs:.*?)(?=^test:|^extra:|^about:|\Z)',
force_top_level=True, apply_selectors=apply_selectors)
def extract_source_text(self):
return self.get_recipe_text(
r'(\s*source:.*?)(?=^build:|^requirements:|^test:|^extra:|^about:|^outputs:|\Z)')
def extract_package_and_build_text(self):
return self.get_recipe_text(r'(^.*?)(?=^requirements:|^test:|^extra:|^about:|^outputs:|\Z)')
def extract_single_output_text(self, output_name, output_type, apply_selectors=True):
# first, need to figure out which index in our list of outputs the name matches.
# We have to do this on rendered data, because templates can be used in output names
recipe_text = self.extract_outputs_text(apply_selectors=apply_selectors)
output_matches = output_re.findall(recipe_text)
outputs = self.meta.get('outputs') or (self.parent_outputs if hasattr(self, 'parent_outputs') else None)
if not outputs:
outputs = [{'name': self.name()}]
try:
if output_type:
output_tuples = [(out.get('name', self.name()),
out.get('type', 'conda_v2' if self.config.conda_pkg_format == "2" else 'conda'))
for out in outputs]
output_index = output_tuples.index((output_name, output_type))
else:
output_tuples = [out.get('name', self.name()) for out in outputs]
output_index = output_tuples.index(output_name)
output = output_matches[output_index] if output_matches else ''
except ValueError:
if (not self.path and self.meta.get('extra', {}).get('parent_recipe')):
utils.get_logger(__name__).warn("Didn't match any output in raw metadata. Target value was: {}".format(output_name))
output = ''
else:
output = self.name()
return output
@property
def numpy_xx(self):
'''This is legacy syntax that we need to support for a while. numpy x.x means
"pin run as build" for numpy. It was special-cased to only numpy.'''
text = self.extract_requirements_text()
uses_xx = bool(numpy_xx_re.search(text))
return uses_xx
@property
def uses_numpy_pin_compatible_without_xx(self):
text = self.extract_requirements_text()
compatible_search = numpy_compatible_re.search(text)
max_pin_search = None
if compatible_search:
max_pin_search = numpy_compatible_x_re.search(text)
# compatible_search matches simply use of pin_compatible('numpy')
# max_pin_search quantifies the actual number of x's in the max_pin field. The max_pin
# field can be absent, which is equivalent to a single 'x'
return (bool(compatible_search),
max_pin_search.group(1).count('x') != 2 if max_pin_search else True)
@property
def uses_subpackage(self):
outputs = self.get_section('outputs')
in_reqs = False
for out in outputs:
if 'name' in out:
name_re = re.compile(r"^{}(\s|\Z|$)".format(out['name']))
in_reqs = any(name_re.match(req) for req in self.get_depends_top_and_out('run'))
if in_reqs:
break
subpackage_pin = False
if not in_reqs and self.meta_path:
data = self.extract_requirements_text(force_top_level=True)
if data:
subpackage_pin = re.search("{{\s*pin_subpackage\(.*\)\s*}}", data)
return in_reqs or bool(subpackage_pin)
@property
def uses_new_style_compiler_activation(self):
text = self.extract_requirements_text()
return bool(re.search(r'\{\{\s*compiler\(.*\)\s*\}\}', text))
def validate_features(self):
if any('-' in feature for feature in ensure_list(self.get_value('build/features'))):
raise ValueError("- is a disallowed character in features. Please change this "
"character in your recipe.")
def copy(self):
new = copy.copy(self)
new.config = self.config.copy()
new.config.variant = copy.deepcopy(self.config.variant)
new.meta = copy.deepcopy(self.meta)
new.type = getattr(self, 'type', 'conda_v2' if self.config.conda_pkg_format == "2" else
'conda')
return new
@property
def noarch(self):
return self.get_value('build/noarch')
@noarch.setter
def noarch(self, value):
build = self.meta.get('build', {})
build['noarch'] = value
self.meta['build'] = build
if not self.noarch_python and not value:
self.config.reset_platform()
elif value:
self.config.host_platform = 'noarch'
@property
def noarch_python(self):
return self.get_value('build/noarch_python')
@noarch_python.setter
def noarch_python(self, value):
build = self.meta.get('build', {})
build['noarch_python'] = value
self.meta['build'] = build
if not self.noarch and not value:
self.config.reset_platform()
elif value:
self.config.host_platform = 'noarch'
@property
def variant_in_source(self):
variant = self.config.variant
self.config.variant = {}
self.parse_again(permit_undefined_jinja=True, allow_no_other_outputs=True,
bypass_env_check=True)
vars_in_recipe = set(self.undefined_jinja_vars)
self.config.variant = variant
for key in (vars_in_recipe & set(variant.keys())):
# We use this variant in the top-level recipe.
# constrain the stored variants to only this version in the output
# variant mapping
if re.search(r"\s*\{\{\s*%s\s*(?:.*?)?\}\}" % key, self.extract_source_text()):
return True
return False
@property
def pin_depends(self):
return self.get_value('build/pin_depends', '').lower()
@property
def source_provided(self):
return (not bool(self.meta.get('source')) or
(os.path.isdir(self.config.work_dir) and len(os.listdir(self.config.work_dir)) > 0))
def reconcile_metadata_with_output_dict(self, output_metadata, output_dict):
output_metadata.meta['package']['name'] = output_dict.get('name', self.name())
# make sure that subpackages do not duplicate tests from top-level recipe
test = output_metadata.meta.get('test', {})
if output_dict.get('name') != self.name() or not (output_dict.get('script') or
output_dict.get('files')):
if 'commands' in test:
del test['commands']
if 'imports' in test:
del test['imports']
# make sure that subpackages do not duplicate top-level entry-points or run_exports
build = output_metadata.meta.get('build', {})
transfer_keys = 'entry_points', 'run_exports', 'script'
for key in transfer_keys:
if key in output_dict:
build[key] = output_dict[key]
elif key in build:
del build[key]
output_metadata.meta['build'] = build
# reset this so that reparsing does not reset the metadata name
output_metadata._meta_path = ""
def get_output_metadata(self, output):
if output.get('name') == self.name():
output_metadata = self.copy()
output_metadata.type = output.get('type', 'conda_v2' if self.config.conda_pkg_format == "2" else
'conda')
else:
output_metadata = self.copy()
output_reqs = utils.expand_reqs(output.get('requirements', {}))
build_reqs = output_reqs.get('build', [])
host_reqs = output_reqs.get('host', [])
run_reqs = output_reqs.get('run', [])
constrain_reqs = output_reqs.get('run_constrained', [])
# pass through any other unrecognized req types
other_reqs = {k: v for k, v in output_reqs.items() if k not in
('build', 'host', 'run', 'run_constrained')}
if output.get('target'):
output_metadata.config.target_subdir = output['target']
if self.name() != output.get('name') or (output.get('script') or output.get('files')):
self.reconcile_metadata_with_output_dict(output_metadata, output)
output_metadata.type = output.get('type', 'conda_v2' if self.config.conda_pkg_format == "2" else
'conda')
if 'name' in output:
# since we are copying reqs from the top-level package, which
# can depend on subpackages, make sure that we filter out
# subpackages so that they don't depend on themselves
subpackage_pattern = re.compile(r'(?:^{}(?:\s|$|\Z))'.format(output['name']))
if build_reqs:
build_reqs = [req for req in build_reqs if not subpackage_pattern.match(req)]
if host_reqs:
host_reqs = [req for req in host_reqs if not subpackage_pattern.match(req)]
if run_reqs:
run_reqs = [req for req in run_reqs if not subpackage_pattern.match(req)]
requirements = {}
requirements.update({'build': build_reqs}) if build_reqs else None
requirements.update({'host': host_reqs}) if host_reqs else None
requirements.update({'run': run_reqs}) if run_reqs else None
requirements.update({'run_constrained': constrain_reqs}) if constrain_reqs else None
requirements.update(other_reqs)
output_metadata.meta['requirements'] = requirements
output_metadata.meta['package']['version'] = output.get('version') or self.version()
output_metadata.final = False
output_metadata.noarch = output.get('noarch', False)
output_metadata.noarch_python = output.get('noarch_python', False)
# primarily for tests - make sure that we keep the platform consistent (setting noarch
# would reset it)
if (not (output_metadata.noarch or output_metadata.noarch_python) and
self.config.platform != output_metadata.config.platform):
output_metadata.config.platform = self.config.platform
build = output_metadata.meta.get('build', {})
# legacy (conda build 2.1.x - 3.0.25). Newer stuff should just emulate
# the top-level recipe, with full sections for build, test, about
if 'number' in output:
build['number'] = output['number']
if 'string' in output:
build['string'] = output['string']
if 'run_exports' in output and output['run_exports']:
build['run_exports'] = output['run_exports']
if 'track_features' in output and output['track_features']:
build['track_features'] = output['track_features']
if 'features' in output and output['features']:
build['features'] = output['features']
# 3.0.26+ - just pass through the whole build section from the output.
# It clobbers everything else, aside from build number
if 'build' in output:
build = output['build']
if build is None:
build = {}
if 'number' not in build:
build['number'] = output.get('number', output_metadata.build_number())
output_metadata.meta['build'] = build
if 'test' in output:
output_metadata.meta['test'] = output['test']
if 'about' in output:
output_metadata.meta['about'] = output['about']
self.append_parent_metadata(output_metadata)
return output_metadata
def append_parent_metadata(self, out_metadata):
extra = self.meta.get('extra', {})
extra['parent_recipe'] = {'path': self.path, 'name': self.name(),
'version': self.version()}
out_metadata.meta['extra'] = extra
def get_reduced_variant_set(self, used_variables):
# reduce variable space to limit work we need to do
full_collapsed_variants = variants.list_of_dicts_to_dict_of_lists(self.config.variants)
reduced_collapsed_variants = full_collapsed_variants.copy()
reduce_keys = set(self.config.variants[0].keys()) - set(used_variables)
zip_key_groups = self.config.variant.get('zip_keys', [])
zip_key_groups = ([zip_key_groups] if zip_key_groups and
isinstance(zip_key_groups[0], string_types) else zip_key_groups)
used_zip_key_groups = [group for group in zip_key_groups if any(
set(group) & set(used_variables))]
extend_keys = full_collapsed_variants.get('extend_keys', [])
reduce_keys = [key for key in reduce_keys if not any(key in group for group in
used_zip_key_groups) and key not in extend_keys]
for key in reduce_keys:
values = full_collapsed_variants.get(key)
if values is not None and len(values) and not hasattr(values, 'keys') and key != 'zip_keys':
# save only one element from this key
reduced_collapsed_variants[key] = utils.ensure_list(next(iter(values)))
out = variants.dict_of_lists_to_list_of_dicts(reduced_collapsed_variants)
return out
def get_output_metadata_set(self, permit_undefined_jinja=False,
permit_unsatisfiable_variants=False,
bypass_env_check=False):
from conda_build.source import provide
out_metadata_map = {}
if self.final:
outputs = get_output_dicts_from_metadata(self)[0]
output_tuples = [(outputs, self)]
else:
all_output_metadata = OrderedDict()
used_variables = self.get_used_loop_vars(force_global=True)
top_loop = self.get_reduced_variant_set(used_variables) or self.config.variants[:1]
for variant in (top_loop if (hasattr(self.config, 'variants') and self.config.variants) else [self.config.variant]):
ref_metadata = self.copy()
ref_metadata.config.variant = variant
if ref_metadata.needs_source_for_render and self.variant_in_source:
ref_metadata.parse_again()
utils.rm_rf(ref_metadata.config.work_dir)
provide(ref_metadata)
ref_metadata.parse_again()
try:
ref_metadata.parse_until_resolved(allow_no_other_outputs=True, bypass_env_check=True)
except SystemExit:
pass
outputs = get_output_dicts_from_metadata(ref_metadata)
try:
for out in outputs:
requirements = out.get('requirements')
if requirements:
requirements = utils.expand_reqs(requirements)
for env in ('build', 'host', 'run'):
insert_variant_versions(requirements, variant, env)
out['requirements'] = requirements
out_metadata = ref_metadata.get_output_metadata(out)
# keeping track of other outputs is necessary for correct functioning of the
# pin_subpackage jinja2 function. It's important that we store all of
# our outputs so that they can be referred to in later rendering. We
# also refine this collection as each output metadata object is
# finalized - see the finalize_outputs_pass function
all_output_metadata[(out_metadata.name(),
HashableDict({k: out_metadata.config.variant[k]
for k in out_metadata.get_used_vars()}))] = out, out_metadata
out_metadata_map[HashableDict(out)] = out_metadata
ref_metadata.other_outputs = out_metadata.other_outputs = all_output_metadata
except SystemExit:
if not permit_undefined_jinja:
raise
out_metadata_map = {}
assert out_metadata_map, ("Error: output metadata set is empty. Please file an issue"
" on the conda-build tracker at https://github.com/conda/conda-build/issues")
# format here is {output_dict: metadata_object}
render_order = toposort(out_metadata_map)
check_circular_dependencies(render_order)
conda_packages = OrderedDict()
non_conda_packages = []
for output_d, m in render_order.items():
if not output_d.get('type') or output_d['type'] in ('conda', 'conda_v2'):
conda_packages[m.name(), HashableDict({k: m.config.variant[k]
for k in m.get_used_vars()})] = (output_d, m)
elif output_d.get('type') == 'wheel':
if (not output_d.get('requirements', {}).get('build') or
not any('pip' in req for req in output_d['requirements']['build'])):
build_reqs = output_d.get('requirements', {}).get('build', [])
build_reqs.extend(['pip', 'python {}'.format(m.config.variant['python'])])
output_d['requirements'] = output_d.get('requirements', {})
output_d['requirements']['build'] = build_reqs
m.meta['requirements'] = m.meta.get('requirements', {})
m.meta['requirements']['build'] = build_reqs
non_conda_packages.append((output_d, m))
else:
# for wheels and other non-conda packages, just append them at the end.
# no deduplication with hashes currently.
# hard part about including any part of output_d
# outside of this func is that it is harder to
# obtain an exact match elsewhere
non_conda_packages.append((output_d, m))
# early stages don't need to do the finalization. Skip it until the later stages
# when we need it.
if not permit_undefined_jinja and not ref_metadata.skip():
conda_packages = finalize_outputs_pass(ref_metadata, conda_packages, pass_no=0,
permit_unsatisfiable_variants=permit_unsatisfiable_variants,
bypass_env_check=bypass_env_check)
# Sanity check: if any exact pins of any subpackages, make sure that they match
ensure_matching_hashes(conda_packages)
final_conda_packages = []
for (out_d, m) in conda_packages.values():
# We arbitrarily mark all output metadata as final, regardless
# of if it truly is or not. This is done to add sane hashes
# to unfinalizable packages, so that they are differentiable
# from one another. This is mostly a test concern than an
# actual one, as any "final" recipe returned here will still
# barf if anyone tries to actually build it.
m.final = True
final_conda_packages.append((out_d, m))
output_tuples = final_conda_packages + non_conda_packages
return output_tuples
def get_loop_vars(self):
_variants = (self.config.input_variants if hasattr(self.config, 'input_variants') else
self.config.variants)
return variants.get_vars(_variants, loop_only=True)
def get_used_loop_vars(self, force_top_level=False, force_global=False):
return {var for var in self.get_used_vars(force_top_level=force_top_level,
force_global=force_global)
if var in self.get_loop_vars()}
def get_rendered_recipe_text(self, permit_undefined_jinja=False, extract_pattern=None):
template_string = self.get_recipe_text(extract_pattern=extract_pattern,
force_top_level=True).rstrip()
return (yaml.safe_load(self._get_contents(permit_undefined_jinja=permit_undefined_jinja,
template_string=template_string,
skip_build_id=False)) or {})
def get_rendered_outputs_section(self, permit_undefined_jinja=False, variant=None):
extract_pattern = r'(.*)package:'
template_string = '\n'.join((self.get_recipe_text(extract_pattern=extract_pattern,
force_top_level=True),
# second item: the output text for this metadata
# object (might be output)
self.extract_outputs_text())).rstrip()
outputs = (yaml.safe_load(self._get_contents(permit_undefined_jinja=permit_undefined_jinja,
template_string=template_string,
skip_build_id=False,
allow_no_other_outputs=permit_undefined_jinja,
variant=variant)) or
{}).get('outputs', [])
return get_output_dicts_from_metadata(self, outputs=outputs)
def get_rendered_output(self, name, permit_undefined_jinja=False, variant=None):
"""This is for obtaining the rendered, parsed, dictionary-object representation of an
output. It's not useful for saying what variables are used. You need earlier, more raw
versions of the metadata for that. It is useful, however, for getting updated, re-rendered
contents of outputs."""
output = None
for output_ in self.get_rendered_outputs_section(
permit_undefined_jinja=permit_undefined_jinja, variant=variant):
if output_.get('name') == name:
output = output_
break
return output
@property
def force_ignore_keys(self):
return ensure_list(self.get_value('build/force_ignore_keys'))
@property
def force_use_keys(self):
return ensure_list(self.get_value('build/force_use_keys'))
def get_used_vars(self, force_top_level=False, force_global=False):
global used_vars_cache
recipe_dir = self.path
# `HashableDict` does not handle lists of other dictionaries correctly. Also it
# is constructed inplace, taking references to sub-elements of the input dict
# and thus corrupting it. Also, this was being called in 3 places in this function
# so caching it is probably a good thing.
hashed_variants = HashableDict(copy.deepcopy(self.config.variant))
if hasattr(self.config, 'used_vars'):
used_vars = self.config.used_vars
elif (self.name(), recipe_dir, force_top_level, force_global, self.config.subdir,
hashed_variants) in used_vars_cache:
used_vars = used_vars_cache[(self.name(), recipe_dir,
force_top_level, force_global, self.config.subdir,
hashed_variants)]
else:
meta_yaml_reqs = self._get_used_vars_meta_yaml(force_top_level=force_top_level,
force_global=force_global)
is_output = 'package:' not in self.get_recipe_text()
if is_output:
script_reqs = self._get_used_vars_output_script()
else:
script_reqs = self._get_used_vars_build_scripts()
used_vars = meta_yaml_reqs | script_reqs
# force target_platform to always be included, because it determines behavior
if ('target_platform' in self.config.variant and
any(plat != self.config.subdir for plat in
self.get_variants_as_dict_of_lists()['target_platform'])):
used_vars.add('target_platform')
if self.force_use_keys or self.force_ignore_keys:
used_vars = (used_vars - set(self.force_ignore_keys)) | set(self.force_use_keys)
used_vars_cache[(self.name(), recipe_dir, force_top_level, force_global,
self.config.subdir, hashed_variants)] = used_vars
return used_vars
def _get_used_vars_meta_yaml_helper(self, force_top_level=False, force_global=False,
apply_selectors=False):
if force_global:
recipe_text = self.get_recipe_text(force_top_level=force_top_level,
apply_selectors=apply_selectors)
# a bit hacky. When we force global, we don't distinguish
# between requirements and the rest
reqs_text = recipe_text
else:
if self.is_output and not force_top_level:
recipe_text = self.extract_single_output_text(self.name(), getattr(self, 'type', None),
apply_selectors=apply_selectors)
else:
recipe_text = (self.get_recipe_text(force_top_level=force_top_level,
apply_selectors=apply_selectors).replace(
self.extract_outputs_text(
apply_selectors=apply_selectors).strip(), '') +
self.extract_single_output_text(self.name(), getattr(self, 'type', None),
apply_selectors=apply_selectors))
reqs_re = re.compile(r"requirements:.+?(?=^\w|\Z|^\s+-\s(?=name|type))",
flags=re.M | re.S)
reqs_text = reqs_re.search(recipe_text)
reqs_text = reqs_text.group() if reqs_text else ''
return reqs_text, recipe_text
def _get_used_vars_meta_yaml(self, force_top_level=False, force_global=False):
# make variant dict hashable so that memoization works
variant_keys = tuple(sorted(self.config.variant.keys()))
reqs_text, recipe_text = self._get_used_vars_meta_yaml_helper(
force_top_level=force_top_level, force_global=force_global, apply_selectors=False)
all_used_selectors = variants.find_used_variables_in_text(variant_keys, recipe_text,
selectors=True)
reqs_text, recipe_text = self._get_used_vars_meta_yaml_helper(
force_top_level=force_top_level, force_global=force_global, apply_selectors=True)
all_used_reqs = variants.find_used_variables_in_text(variant_keys, recipe_text,
selectors=False)
all_used = all_used_reqs.union(all_used_selectors)
# things that are only used in requirements need further consideration,
# for omitting things that are only used in run
if force_global:
used = all_used
else:
requirements_used = variants.find_used_variables_in_text(variant_keys, reqs_text)
outside_reqs_used = all_used - requirements_used
requirements_used = trim_build_only_deps(self, requirements_used)
used = outside_reqs_used | requirements_used
return used
def _get_used_vars_build_scripts(self):
used_vars = set()
buildsh = os.path.join(self.path, 'build.sh')
if os.path.isfile(buildsh):
used_vars.update(variants.find_used_variables_in_shell_script(self.config.variant,
buildsh))
bldbat = os.path.join(self.path, 'bld.bat')
if self.config.platform == 'win' and os.path.isfile(bldbat):
used_vars.update(variants.find_used_variables_in_batch_script(self.config.variant,
bldbat))
return used_vars
def _get_used_vars_output_script(self):
this_output = self.get_rendered_output(self.name(),
permit_undefined_jinja=True) or {}
used_vars = set()
if 'script' in this_output:
script = os.path.join(self.path, this_output['script'])
if os.path.splitext(script)[1] == '.sh':
used_vars.update(variants.find_used_variables_in_shell_script(self.config.variant,
script))
elif os.path.splitext(script)[1] == '.bat':
used_vars.update(variants.find_used_variables_in_batch_script(self.config.variant,
script))
else:
log = utils.get_logger(__name__)
log.warn('Not detecting used variables in output script {}; conda-build only knows '
'how to search .sh and .bat files right now.'.format(script))
return used_vars
def get_variants_as_dict_of_lists(self):
return variants.list_of_dicts_to_dict_of_lists(self.config.variants)
def clean(self):
"""This ensures that clean is called with the correct build id"""
self.config.clean()
@property
def activate_build_script(self):
b = self.meta.get('build', {}) or {}
should_activate = (self.uses_new_style_compiler_activation or b.get('activate_in_script') is not False)
return bool(self.config.activate and should_activate) and not self.name() == 'conda'
@property
def build_is_host(self):
manual_overrides = self.meta.get('build', {}).get('merge_build_host') is True or self.config.build_is_host
manually_disabled = self.meta.get('build', {}).get('merge_build_host') is False
return manual_overrides or (self.config.subdirs_same and not manually_disabled and
'host' not in self.meta.get('requirements', {}) and not self.uses_new_style_compiler_activation)
def get_top_level_recipe_without_outputs(self):
recipe_no_outputs = self.get_recipe_text(force_top_level=True).replace(
self.extract_outputs_text(), "")
top_no_outputs = {}
# because we're an output, calls to PKG_NAME used in the top-level
# content will reflect our current name, not the top-level name. We
# fix that here by replacing any PKG_NAME instances with the known
# parent name
parent_recipe = self.meta.get('extra', {}).get('parent_recipe', {})
alt_name = parent_recipe['name'] if self.is_output else None
if recipe_no_outputs:
top_no_outputs = yaml.safe_load(self._get_contents(False,
template_string=recipe_no_outputs,
alt_name=alt_name))
return top_no_outputs or {}
def get_test_deps(self, py_files, pl_files, lua_files, r_files):
specs = ['%s %s %s' % (self.name(), self.version(), self.build_id())]
# add packages listed in the run environment and test/requires
specs.extend(ms.spec for ms in self.ms_depends('run'))
specs += utils.ensure_list(self.get_value('test/requires', []))
if py_files:
# as the tests are run by python, ensure that python is installed.
# (If they already provided python as a run or test requirement,
# this won't hurt anything.)
specs += ['python']
if pl_files:
# as the tests are run by perl, we need to specify it
specs += ['perl']
if lua_files:
# not sure how this shakes out
specs += ['lua']
if r_files and not any(s.split()[0] in ('r-base', 'mro-base') for s in specs):
# not sure how this shakes out
specs += ['r-base']
specs.extend(utils.ensure_list(self.config.extra_deps))
return specs
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/metadata.py",
"copies": "1",
"size": "106861",
"license": "bsd-3-clause",
"hash": -466495916882600060,
"line_mean": 45.3605206074,
"line_max": 133,
"alpha_frac": 0.5663712673,
"autogenerated": false,
"ratio": 4.08053306858103,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002113522798850294,
"num_lines": 2305
} |
from __future__ import (absolute_import, division, print_function)
from collections import OrderedDict
import copy
import numpy as np
from qtpy.QtWidgets import QApplication, QMainWindow
from addie.utilities import load_ui
from qtpy import QtGui, QtCore
from addie.databases.oncat.oncat import OncatErrorMessageWindow
from addie.databases.oncat.oncat import pyoncatGetIptsList
from addie.processing.mantid.master_table.table_row_handler import TableRowHandler
from addie.processing.mantid.master_table.master_table_loader import LoaderOptionsInterface
from addie.processing.mantid.master_table.import_from_database.global_rule_handler import GlobalRuleHandler
from addie.processing.mantid.master_table.import_from_database.table_search_engine import TableSearchEngine
from addie.processing.mantid.master_table.import_from_database.oncat_template_retriever import OncatTemplateRetriever
from addie.processing.mantid.master_table.import_from_database.gui_handler import GuiHandler, ImportFromDatabaseTableHandler
from addie.processing.mantid.master_table.import_from_database.import_table_from_oncat_handler import ImportTableFromOncat
from addie.processing.mantid.master_table.import_from_database.table_widget_rule_handler import TableWidgetRuleHandler
from addie.processing.mantid.master_table.import_from_database.apply_rule_handler import ApplyRuleHandler
from addie.processing.mantid.master_table.import_from_database.data_to_import_handler import DataToImportHandler
from addie.processing.mantid.master_table.import_from_database.format_json_from_database_to_master_table \
import FormatJsonFromDatabaseToMasterTable
from addie.utilities.gui_handler import TableHandler
try:
from addie.processing.mantid.master_table.import_from_database.oncat_authentication_handler import OncatAuthenticationHandler
import pyoncat
ONCAT_ENABLED = True
except ImportError:
print('pyoncat module not found. Functionality disabled')
ONCAT_ENABLED = False
class ImportFromDatabaseHandler:
def __init__(self, parent=None):
if parent.import_from_database_ui is None:
o_import = ImportFromDatabaseWindow(parent=parent)
parent.import_from_database_ui = o_import
if parent.import_from_database_ui_position:
parent.import_from_database_ui.move(parent.import_from_database_ui_position)
o_import.show()
else:
parent.import_from_database_ui.setFocus()
parent.import_from_database_ui.activateWindow()
class ImportFromDatabaseWindow(QMainWindow):
filter_column_widths = [5, 60, 200, 100, 300]
row_height = 40
button_height = 30
button_width = 150
list_ui = {}
ipts_exist = True
nexus_json = {}
nexus_json_from_template = {}
metadata = {}
list_of_nexus_found = []
list_of_nexus_not_found = []
list_of_nexus_filtered_out = []
# first time filling the metadata filter table
first_time_filling_table = True
first_time_filling_preview_table = True
oncat_template = {}
global_rule_dict = {} # will look like {'0': {'name': 'group 0',
# 'list_rules': ['#0'],
# 'inner_rule': 'and',
# 'outer_rule': 'and'},
# }
list_of_rows_with_global_rule = set() # final list of rows to use to import rows into master table
def __init__(self, parent=None):
self.parent = parent
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('import_from_database.ui', baseinstance=self)
self.init_widgets()
QApplication.processEvents()
self.init_oncat_template()
def next_function(self):
self.init_oncat_template()
def init_oncat_template(self):
"""In order to display in the first tab all the metadata just like ONCat does
on the web site, we need to retrieve the same template as ONCat uses. This is
what is going on right here"""
o_retriever = OncatTemplateRetriever(parent=self.parent)
self.oncat_template = o_retriever.get_template_information()
if self.oncat_template == {}:
OncatAuthenticationHandler(parent=self.parent,
next_ui='from_database_ui',
next_function=self.next_function)
self.parent.oncat_authentication_ui.activateWindow()
self.parent.oncat_authentication_ui.setFocus()
else:
self.radio_button_changed()
self.init_list_ipts()
def init_list_ipts(self):
# retrieve list and display of IPTS for this user
instrument = self.parent.instrument['short_name']
facility = self.parent.facility
list_ipts = pyoncatGetIptsList(oncat=self.parent.oncat,
instrument=instrument,
facility=facility)
self.list_ipts = list_ipts
self.ui.ipts_combobox.addItems(list_ipts)
def init_widgets(self):
if self.parent.oncat is None:
return
self.ui.tableWidget.setColumnHidden(0, True)
self.ui.error_message.setStyleSheet("color: red")
self.ui.error_message.setVisible(False)
# add icons on top of widgets (clear, search)
self.ui.clear_ipts_button.setIcon(QtGui.QIcon(":/MPL Toolbar/clear_icon.png"))
self.ui.clear_run_button.setIcon(QtGui.QIcon(":/MPL Toolbar/clear_icon.png"))
self.ui.search_logo_label.setPixmap(QtGui.QPixmap(":/MPL Toolbar/search_icon.png"))
self.ui.clear_search_button.setIcon(QtGui.QIcon(":/MPL Toolbar/clear_icon.png"))
for _col, _width in enumerate(self.filter_column_widths):
self.ui.tableWidget.setColumnWidth(_col, _width)
self.ui.splitter.setStyleSheet("""
QSplitter::handle {
image: url(':/MPL Toolbar/splitter_icon.png');
}
""")
def insert_in_master_table(self, nexus_json=[]):
if nexus_json == []:
return
runs_dict = self.build_result_dictionary(nexus_json=nexus_json)
o_row = TableRowHandler(parent=self.parent)
for _run in runs_dict.keys():
_chemical_formula = runs_dict[_run]['chemical_formula']
_mass_density = runs_dict[_run]['mass_density']
_run = "{}".format(_run)
o_row.fill_row(sample_runs=_run,
sample_chemical_formula=_chemical_formula,
sample_mass_density=_mass_density)
def define_unique_rule_name(self, row):
"""this method makes sure that the name of the rule defined is unique and does not exist already"""
nbr_row = self.ui.tableWidget.rowCount()
list_rule_name = []
for _row in np.arange(nbr_row):
if self.ui.tableWidget.item(_row, 1):
_rule_name = str(self.ui.tableWidget.item(_row, 1).text())
list_rule_name.append(_rule_name)
offset = 0
while True:
if ("{}".format(offset+row)) in list_rule_name:
offset += 1
else:
return offset+row
def refresh_global_rule(self, full_reset=False, new_row=-1):
if full_reset:
list_rule_number = []
nbr_row = self.ui.tableWidget.rowCount()
for _row in np.arange(nbr_row):
rule_number = "#{}".format(str(self.ui.tableWidget.item(_row, 1).text()))
list_rule_number.append(rule_number)
global_rule = " and ".join(list_rule_number)
else:
current_global_rule = str(self.ui.global_rule_lineedit.text())
name_of_new_row = str(self.ui.tableWidget.item(new_row, 1).text())
if current_global_rule == "":
global_rule = "#{}".format(name_of_new_row)
else:
global_rule = current_global_rule + " and #{}".format(name_of_new_row)
self.ui.global_rule_lineedit.setText(global_rule)
def check_all_filter_widgets(self):
self.check_remove_widget()
self.check_rule_widgets()
def check_rule_widgets(self):
nbr_row = self.ui.tableWidget.rowCount()
enable_global_rule_label = False
enable_global_rule_value = False
enable_global_rule_button = False
if nbr_row == 0:
pass
elif nbr_row == 1:
enable_global_rule_label = True
enable_global_rule_value = True
else:
enable_global_rule_label = True
enable_global_rule_value = True
enable_global_rule_button = True
self.ui.global_rule_label.setEnabled(enable_global_rule_label)
self.ui.global_rule_lineedit.setEnabled(enable_global_rule_value)
self.ui.global_rule_button.setEnabled(enable_global_rule_button)
def check_remove_widget(self):
nbr_row = self.ui.tableWidget.rowCount()
if nbr_row > 0:
self.ui.remove_criteria_button.setEnabled(True)
else:
self.ui.remove_criteria_button.setEnabled(False)
def files_not_found_more_clicked(self):
list_of_runs_not_found = self.list_of_runs_not_found
self.inform_of_list_of_runs(list_of_runs=list_of_runs_not_found,
message='List of NeXus not found!')
def files_filtered_out_more_clicked(self):
pass
def files_imported_more_clicked(self):
pass
def files_initially_selected_more_clicked(self):
list_of_nexus_found = self.list_of_runs_found
self.inform_of_list_of_runs(list_of_runs=list_of_nexus_found,
message='List of NeXus found!')
def inform_of_list_of_runs(self, list_of_runs='', message=''):
if list_of_runs == '':
return
o_info = OncatErrorMessageWindow(parent=self,
list_of_runs=list_of_runs,
message=message)
o_info.show()
def refresh_preview_table_of_runs(self):
"""using either the IPTS number selected or the runs defined, this will use the ONCat template to
retrieve all the information from the template and populate the preview table """
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
QApplication.processEvents()
o_import = ImportTableFromOncat(parent=self)
try:
o_import.from_oncat_template()
nexus_json = self.nexus_json_from_template
self.nexus_json_all_infos = nexus_json
enabled_widgets = False
if not (nexus_json == {}):
enabled_widgets = True
GuiHandler.preview_widget_status(self.ui, enabled_widgets=enabled_widgets)
self.refresh_preview_table(nexus_json=copy.deepcopy(nexus_json))
QApplication.restoreOverrideCursor()
QApplication.processEvents()
except pyoncat.InvalidRefreshTokenError:
QApplication.restoreOverrideCursor()
QApplication.processEvents()
OncatAuthenticationHandler(parent=self.parent,
next_function=self.refresh_preview_table_of_runs)
def refresh_filter_page(self):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
QApplication.processEvents()
if self.ui.import_button.isEnabled():
o_import = ImportTableFromOncat(parent=self)
o_import.from_oncat_config(insert_in_table=False)
nexus_json = self.nexus_json
enabled_widgets = False
if not (nexus_json == {}):
enabled_widgets = True
GuiHandler.filter_widget_status(self.ui, enabled_widgets=enabled_widgets)
self.refresh_filter_table(nexus_json=copy.deepcopy(nexus_json))
self.update_rule_filter()
QApplication.restoreOverrideCursor()
QApplication.processEvents()
def refresh_preview_table(self, nexus_json=[]):
"""this function will use the template returned by ONCat during the initialization of this
window and will, for all the runs specified, or all teh runs of the given IPTS, all the metadata
defined in that template"""
table_ui = self.ui.tableWidget_all_runs
o_handler = ImportFromDatabaseTableHandler(table_ui=table_ui,
parent=self)
o_handler.refresh_preview_table(nexus_json=nexus_json)
def refresh_filter_table(self, nexus_json=[]):
"""This function takes the nexus_json returns by ONCat and
fill the filter table with only the metadata of interests. Those
are defined in the oncat_metadata_filters dictionary (coming from the json config)
ex: title, chemical formula, mass density, Sample Env. Device and proton charge
"""
if nexus_json == []:
nexus_json = self.nexus_json
table_ui = self.ui.tableWidget_filter_result
o_handler = ImportFromDatabaseTableHandler(table_ui=table_ui,
parent=self)
o_handler.refresh_table(nexus_json=nexus_json)
def update_rule_filter(self):
o_rule = ApplyRuleHandler(parent=self)
o_rule.apply_global_rule()
def update_global_rule(self, row=-1, is_removed=False, is_added=False):
"""when user adds or removes a rule (criteria), we need to update the global rule dictionary"""
o_rule_handler = ApplyRuleHandler(parent=self)
o_rule_handler.change_rule(row=row, is_removed=is_removed, is_added=is_added)
global_rule_string = o_rule_handler.create_global_rule_string()
self.ui.global_rule_lineedit.setText(global_rule_string)
def create_global_rule_string(self):
o_rule_handler = ApplyRuleHandler(parent=self)
global_rule_string = o_rule_handler.create_global_rule_string()
return global_rule_string
# EVENT HANDLER CREATED DURING RUN TIME ----------------------------
def list_argument_changed(self, value, key):
self.update_rule_filter()
GuiHandler.check_import_button(self)
def list_argument_index_changed(self, value, key):
self.update_rule_filter()
GuiHandler.check_import_button(self)
def list_criteria_changed(self, value, key):
self.update_rule_filter()
GuiHandler.check_import_button(self)
def list_item_changed(self, index, key):
"""this method is reached when the user changes the name of the variable he wants to filter"""
o_table = TableWidgetRuleHandler(parent=self)
o_table.update_list_value_of_given_item(index=index, key=key)
self.update_rule_filter()
GuiHandler.check_import_button(self)
# EVENT HANDLER ---------------------------------------------------
def change_user_clicked(self):
OncatAuthenticationHandler(parent=self.parent)
def radio_button_changed(self):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
QApplication.processEvents()
ipts_widgets_status = False
run_widgets_status = True
if self.ui.ipts_radio_button.isChecked():
ipts_widgets_status = True
run_widgets_status = False
if str(self.ui.ipts_lineedit.text()).strip() != "":
self.ipts_text_return_pressed()
else:
self.ipts_selection_changed()
else:
self.ui.error_message.setVisible(False)
self.run_number_return_pressed()
self.ui.ipts_combobox.setEnabled(ipts_widgets_status)
self.ui.ipts_lineedit.setEnabled(ipts_widgets_status)
self.ui.ipts_label.setEnabled(ipts_widgets_status)
self.ui.clear_ipts_button.setEnabled(ipts_widgets_status)
self.ui.run_number_lineedit.setEnabled(run_widgets_status)
self.ui.run_number_label.setEnabled(run_widgets_status)
self.ui.clear_run_button.setEnabled(run_widgets_status)
GuiHandler.check_import_button(self)
QApplication.restoreOverrideCursor()
QApplication.processEvents()
def clear_ipts(self):
self.ui.ipts_lineedit.setText("")
self.refresh_preview_table_of_runs()
def clear_run(self):
self.ui.run_number_lineedit.setText("")
self.refresh_preview_table_of_runs()
def remove_criteria_clicked(self):
_select = self.ui.tableWidget.selectedRanges()
if not _select:
return
row = _select[0].topRow()
_randome_key = str(self.ui.tableWidget.item(row, 0).text())
self.list_ui.pop(_randome_key, None)
self.update_global_rule(row=row, is_removed=True)
self.ui.tableWidget.removeRow(row)
self.check_all_filter_widgets()
self.refresh_global_rule(full_reset=True)
self.update_rule_filter()
GuiHandler.check_import_button(self)
def add_criteria_clicked(self):
nbr_row = self.ui.tableWidget.rowCount()
o_table_handler = TableWidgetRuleHandler(parent=self)
o_table_handler.add_row(row=nbr_row)
self.check_rule_widgets()
self.update_global_rule(row=nbr_row, is_added=True)
self.update_rule_filter()
GuiHandler.check_import_button(self)
def ipts_selection_changed(self, ipts_selected=""):
self.ui.ipts_lineedit.setText("")
self.refresh_preview_table_of_runs()
self.search_return_pressed()
def ipts_text_return_pressed(self):
self.refresh_preview_table_of_runs()
self.search_return_pressed()
def ipts_text_changed(self, ipts_text):
if ipts_text.strip() != "":
str_ipts = "IPTS-{}".format(ipts_text.strip())
ipts_exist = False
if str_ipts in self.list_ipts:
ipts_exist = True
index = self.ui.ipts_combobox.findText(str_ipts)
self.ui.ipts_combobox.blockSignals(True)
self.ui.ipts_combobox.setCurrentIndex(index)
self.ui.ipts_combobox.blockSignals(False)
else:
ipts_exist = True # we will use the combobox IPTS
self.ipts_exist = ipts_exist
GuiHandler.check_import_button(self)
def run_number_return_pressed(self):
self.refresh_preview_table_of_runs()
self.search_return_pressed()
def run_number_text_changed(self, text):
GuiHandler.check_import_button(self)
def edit_global_rule_clicked(self):
GlobalRuleHandler(parent=self)
def search_return_pressed(self):
new_text = str(self.ui.name_search.text())
self.search_text_changed(new_text)
GuiHandler.check_import_button(self)
def search_text_changed(self, new_text):
new_text = str(new_text)
o_search = TableSearchEngine(table_ui=self.ui.tableWidget_all_runs)
list_row_matching_string = o_search.locate_string(new_text)
o_table = TableHandler(table_ui=self.ui.tableWidget_all_runs)
o_table.show_list_of_rows(list_of_rows=list_row_matching_string)
def clear_search_text(self):
self.ui.name_search.setText("")
self.search_return_pressed()
def toolbox_changed(self, index):
if index == 0:
self.nexus_json = {}
self.ui.import_button.setText("Import All Runs")
elif index == 1:
self.ui.import_button.setText("Import Filtered Runs")
self.refresh_filter_page()
GuiHandler.check_import_button(self)
def build_result_dictionary(self, nexus_json=[]):
"""isolate the infos I need from ONCat result to insert in the main window, master table"""
result_dict = OrderedDict()
for _json in nexus_json:
chemical_formula = "{}".format(_json['metadata']['entry']['sample']['chemical_formula'])
mass_density = "{}".format(_json['metadata']['entry']['sample']['mass_density'])
result_dict[_json['indexed']['run_number']] = {'chemical_formula': chemical_formula,
'mass_density': mass_density
}
return result_dict
def import_button_clicked(self):
data_handler = DataToImportHandler(parent=self)
self.json_of_data_to_import = data_handler.get_json_of_data_to_import()
o_dialog = AsciiLoaderOptions(parent=self,
is_parent_main_ui=False,
real_parent=self)
o_dialog.show()
def import_into_master_table(self):
o_format = FormatJsonFromDatabaseToMasterTable(parent=self)
o_format.run(json=self.json_of_data_to_import,
import_option=self.parent.ascii_loader_option)
self.parent.from_oncat_to_master_table(json=o_format.final_json,
with_conflict=o_format.any_conflict)
self.close()
def cancel_button_clicked(self):
self.close()
def closeEvent(self, c):
self.parent.import_from_database_ui = None
self.parent.import_from_database_ui_position = self.pos()
class AsciiLoaderOptions(LoaderOptionsInterface):
def accept(self):
self.parent.ascii_loader_option = self.get_option_selected()
self.real_parent.import_into_master_table()
self.close()
self.parent.check_master_table_column_highlighting()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/import_from_database_handler.py",
"copies": "1",
"size": "21666",
"license": "mit",
"hash": 7578083880727924000,
"line_mean": 38.9005524862,
"line_max": 129,
"alpha_frac": 0.625311548,
"autogenerated": false,
"ratio": 3.851048702452897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9972464178979754,
"avg_score": 0.0007792142946284163,
"num_lines": 543
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import math
from pymel.core import duplicate, dt, group, hide, joint, ikHandle, listConnections, makeIdentity, move, orientConstraint, parent, parentConstraint, PyNode, skinCluster, xform
from .... import core
from .... import lib
from .... import nodeApi
from .. import controllerShape
from .. import space
from ..cardRigging import MetaControl, ParamInfo
from . import _util as util
from .. import node
from .. import rig
@util.adds('stretch')
@util.defaultspec( {'shape': 'box', 'color': 'orange 0.22', 'size': 10 },
middle={'shape': 'sphere', 'color': 'green 0.22', 'size': 7 },
offset={'shape': 'pin', 'color': 'orange 0.22', 'size': 3 },
end={'shape': 'box', 'color': 'orange 0.22', 'size': 10 }, )
def buildSplineChest(start, end, name='Chest', indexOfRibCage=-1, useTrueZero=True, groupName='', controlSpec={}):
'''
Makes a spline from the start to the `indexOfRibCage` joint, and
TODO
- the remaining joints get fk controllers (so you can make the spine and neck all one card, I guess)
'''
srcChain = util.getChain( start, end )
chain = util.dupChain( start, end, '{0}_spline' )
chestBase = chain[indexOfRibCage]
chestIndex = chain.index(chestBase)
if chestIndex % 2 == 0:
# Due to `division`, have to cast to int
midPos = xform(chain[int(chestIndex / 2)], q=True, ws=True, t=True)
midRot = xform(chain[int(chestIndex / 2)], q=True, ws=True, ro=True)
else:
tempIndex = int( math.floor(chestIndex / 2) )
low = chain[ tempIndex ]
high = chain[ tempIndex + 1 ]
midPos = dt.Vector( xform(low, q=True, ws=True, t=True) )
midPos += dt.Vector( xform(high, q=True, ws=True, t=True) )
midPos = dt.Vector(midPos) * .5
'''&&&
To be safe, find the closest axis on the second obj
Get average z basis, forward
then average y basis, up
calc x, side
recalc y, up
This is the world matrix of the average rotation'''
midRot = xform(low, q=True, ws=True, ro=True)
#raise Exception('Need to implement even number of stomach joints')
container = group(em=True, p=node.mainGroup(), n=name + "_controls")
container.inheritsTransform.set(False)
container.inheritsTransform.lock()
chain[0].setParent(container)
mainIk, _effector, crv = ikHandle( sol='ikSplineSolver',
sj=chain[0],
ee=chestBase,
ns=3,
simplifyCurve=False)
crvShape = crv.getShape()
crvShape.overrideEnabled.set(True)
crvShape.overrideDisplayType.set(2)
parent( mainIk, crv, container )
# -- Base -- # I don't think there is any benefit to controlling this, but it might just be my weighting.
base = joint(None, n='Base')
core.dagObj.moveTo(base, chain[0])
base.setParent( container )
parentConstraint( start.getParent(), base, mo=True)
hide(base)
# -- Chest control --
chestCtrl = controllerShape.build( name + '_main', controlSpec['main'], controllerShape.ControlType.SPLINE )
chestCtrl.setParent(container)
util.makeStretchySpline( chestCtrl, mainIk )
chestCtrl.stretch.set(1)
chestCtrl.stretch.lock()
chestCtrl.stretch.setKeyable(False)
core.dagObj.lockScale(chestCtrl)
# Put pivot point at the bottom
chestCtrl.ty.set( chestCtrl.boundingBox()[1][1] )
lib.sharedShape.remove(chestCtrl)
chestCtrl.setPivots( [0, 0, 0], worldSpace=True )
makeIdentity( chestCtrl, a=True, t=True )
lib.sharedShape.use(chestCtrl)
move( chestCtrl, xform(chestBase, q=True, ws=True, t=True), rpr=True )
core.dagObj.zero(chestCtrl)
if useTrueZero:
rot = util.determineClosestWorldOrient(chestBase)
util.storeTrueZero(chestCtrl, rot)
core.dagObj.rezero( chestCtrl ) # Not sure why this is needed but otherwise the translate isn't zeroed
chestCtrl.r.set( rot )
chest = joint(None, n='Chest')
chest.setParent( chestCtrl )
core.dagObj.moveTo(chest, chestBase)
core.dagObj.lockScale(core.dagObj.lockRot(core.dagObj.lockTrans(chest)))
hide(chest)
chestMatcher = util.createMatcher(chestCtrl, srcChain[chestIndex])
chestMatcher.setParent(container)
# Chest spaces need to happen after it's done being manipulated into place
space.add( chestCtrl, start.getParent(), 'local' )
space.add( chestCtrl, start.getParent(), 'local_posOnly', mode=space.Mode.TRANSLATE )
space.addMain( chestCtrl ) # Not sure this space is useful...
space.addTrueWorld( chestCtrl )
space.add( chestCtrl, start.getParent(), 'worldRotate', mode=space.Mode.ALT_ROTATE, rotateTarget=space.getMainGroup())
# -- Chest Offset -- &&& Currently hard coded to make a single offset joint
chestOffsetCtrl = None
if chestIndex < (len(chain) - 1):
chestOffsetCtrl = controllerShape.build( name + '_bend', controlSpec['offset'], controllerShape.ControlType.SPLINE )
chestOffsetCtrl.setParent(chestCtrl)
core.dagObj.matchTo( chestOffsetCtrl, chain[-1])
#move(chestOffsetCtrl, [0, 0.7, 3], r=True)
core.dagObj.zero(chestOffsetCtrl)
core.dagObj.lockScale(chestOffsetCtrl)
parentConstraint(chestOffsetCtrl, chain[-1], mo=True)
# -- Mid --
midCtrl = controllerShape.build( name + '_mid', controlSpec['middle'], controllerShape.ControlType.SPLINE )
#core.dagObj.matchTo( midCtrl, midPoint )
xform( midCtrl, ws=True, t=midPos )
core.dagObj.lockScale(midCtrl)
midCtrl.setParent( container )
mid = joint(None, n='Mid')
#core.dagObj.moveTo( mid, midPoint )
xform( mid, ws=True, t=midPos )
mid.setParent( midCtrl )
core.dagObj.lockScale(core.dagObj.lockRot(core.dagObj.lockTrans(mid)))
hide(mid)
# Mid control's rotation aims at the chest
core.dagObj.zero(midCtrl)
aimer = util.midAimer(base, chestCtrl, midCtrl)
aimer.setParent(container)
hide(aimer)
space.add(midCtrl, aimer, spaceName='default')
userDriven = space.addUserDriven(midCtrl, 'extreme') # Best name I got, extreme poses!
parentConstraint( base, chestCtrl, userDriven, mo=True, skipRotate=('x', 'y', 'z'))
orientConstraint( base, chestCtrl, userDriven, mo=True)
"""
# -- Shoulders --
if numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
shoulderCtrl = controllerShape.build( name + '_shoulders', controlSpec['end'], controllerShape.ControlType.SPLINE )
core.dagObj.matchTo( shoulderCtrl, srcChain[-2]) # We want to use the penultimate joint orientation
core.dagObj.moveTo( shoulderCtrl, end)
controllerShape.scaleAllCVs( shoulderCtrl, x=0.15 )
shoulderZero = core.dagObj.zero(shoulderCtrl)
shoulderZero.setParent(chestCtrl)
core.dagObj.lockScale(core.dagObj.lockTrans(shoulderCtrl))
neck = joint(None, n='Neck')
neck.setParent( shoulderCtrl )
core.dagObj.moveTo( neck, end )
core.dagObj.lockScale(core.dagObj.lockRot(core.dagObj.lockTrans(neck)))
hide(neck)
# -- Neck --
neckCtrl = controllerShape.build( name + '_neck', controlSpec['neck'], controllerShape.ControlType.ROTATE )
core.dagObj.matchTo( neckCtrl, end)
if numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
core.dagObj.zero(neckCtrl).setParent( shoulderCtrl )
core.dagObj.lockScale(core.dagObj.lockTrans(neckCtrl))
space.add( neckCtrl, srcChain[-2], 'chest' )
else:
core.dagObj.zero(neckCtrl).setParent( chestCtrl )
core.dagObj.lockScale(core.dagObj.lockTrans(neckCtrl))
space.add( neckCtrl, chestCtrl, 'chest' )
space.addMain(neckCtrl)
"""
# Constrain to spline proxy, up to the chest...
constraints = []
for src, dest in list(zip( chain, srcChain ))[:chestIndex]:
constraints.append( core.constraints.pointConst( src, dest ) )
constraints.append( core.constraints.orientConst( src, dest ) )
# ... including the chest
src = chain[chestIndex]
dest = srcChain[chestIndex]
# &&& Gotta remove/figure out what is going on here, why can't I just constrain entirely the srcChain to it's dup'd chain?
if False: # numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
constraints.append( core.constraints.pointConst( src, dest ) )
constraints.append( core.constraints.orientConst( src, dest ) )
# ... not including the chest
else:
chestProxy = duplicate(src, po=True)[0]
chestProxy.setParent(chestCtrl)
constraints.append( core.constraints.pointConst( chestProxy, dest ) )
constraints.append( core.constraints.orientConst( chestProxy, dest ) )
hide(chestProxy)
if chestOffsetCtrl:
constraints.append( core.constraints.pointConst( chain[-1], srcChain[-1] ) )
constraints.append( core.constraints.orientConst( chain[-1], srcChain[-1] ) )
#constraints.append( core.constraints.pointConst( neckCtrl, srcChain[-1] ) )
#constraints.append( core.constraints.orientConst( neckCtrl, srcChain[-1] ) )
"""
if numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
# Make a proxy since we can't constrain with maintainOffset=True if we're making fk too.
proxy = duplicate(srcChain[-2], po=True)[0]
proxy.setParent(neck)
core.dagObj.lockTrans(core.dagObj.lockRot(core.dagObj.lockScale(proxy)))
constraints.append( core.constraints.pointConst( proxy, srcChain[-2] ) )
constraints.append( core.constraints.orientConst( proxy, srcChain[-2] ) )
"""
hide(chain, mainIk)
# Bind joints to the curve
if False: # numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
skinCluster( crv, base, mid, chest, neck, tsb=True )
else:
skinCluster( crv, base, mid, chest, tsb=True )
chestCtrl = nodeApi.RigController.convert(chestCtrl)
chestCtrl.container = container
chestCtrl.subControl['mid'] = midCtrl
if chestOffsetCtrl:
chestCtrl.subControl['offset'] = chestOffsetCtrl
#if numChestJoints > 2: # The shoulder control is skipped if there aren't enough joints
# chestCtrl.subControl['offset'] = shoulderCtrl
#chestCtrl.subControl['neck'] = neckCtrl
# Setup advanced twist
startAxis = duplicate( start, po=True )[0]
startAxis.rename( 'startAxis' )
startAxis.setParent( base )
core.dagObj.lockTrans(core.dagObj.lockRot(core.dagObj.lockScale(startAxis)))
endAxis = duplicate( start, po=True )[0]
endAxis.rename( 'endAxis' )
endAxis.setParent( chestCtrl )
endAxis.t.set(0, 0, 0)
core.dagObj.lockTrans(core.dagObj.lockRot(core.dagObj.lockScale(endAxis)))
hide(startAxis, endAxis)
mainIk.dTwistControlEnable.set(1)
mainIk.dWorldUpType.set(4)
startAxis.worldMatrix[0] >> mainIk.dWorldUpMatrix
endAxis.worldMatrix[0] >> mainIk.dWorldUpMatrixEnd
hide(startAxis, endAxis)
return chestCtrl, constraints
'''
# For some reason, direct binding doesn't work out, it throws cycle errors
# but it would be good to get it working like this for consistency.
lib.weights.set( crv,
[ [(base.name(), 1.0)],
[(mid.name(), 0.05), (base.name(), 0.95)],
[(mid.name(), 1.0) ],
[(chest.name(), 1.0) ],
[(chest.name(), 0.55), (end.name(), 0.45)],
[(neck.name(), 1.0)],
[(neck.name(), 1.0)] ] )
'''
class SplineChest(MetaControl):
''' Spline control for the chest mass.'''
#ik_ = 'pdil.tool.fossil.rigging.splineChest.buildSplineChest'
ik_ = __name__ + '.' + buildSplineChest.__name__ # Uses strings so reloading development always grabs the latest
ikInput = OrderedDict( [('name', ParamInfo( 'Name', 'Name', ParamInfo.STR, 'Chest')),
('useTrueZero', ParamInfo( 'Use True Zero', 'Use True Zero', ParamInfo.BOOL, False)),
('indexOfRibCage', ParamInfo( 'Base of Rib Cage Index', 'Index of the bottom of the rib cage.', ParamInfo.INT, -1)),
] )
fkArgs = {'translatable': True}
def activate_ik(chestCtrl):
'''
'''
util.alignToMatcher(chestCtrl)
matcher = util.getMatcher(chestCtrl)
endJoint = PyNode( parentConstraint(matcher, q=True, tl=True)[0] )
endBpj = rig.getBPJoint(endJoint)
if chestCtrl.isPrimarySide:
children = [c.real for c in endBpj.proxyChildren if not c.isHelper]
else:
children = [c.realMirror for c in endBpj.proxyChildren if not c.isHelper]
if children:
rot = xform(children[0], q=True, ws=True, ro=True)
pos = xform(children[0], q=True, ws=True, t=True)
# ---
midJnt = chestCtrl.subControl['mid'].listRelatives(type='joint')[0]
skin = listConnections(midJnt, type='skinCluster')
curveShape = skin[0].outputGeometry[0].listConnections(p=True)[0].node()
ikHandle = curveShape.worldSpace.listConnections( type='ikHandle' )[0]
chain = util.getChainFromIk(ikHandle)
boundJoints = util.getConstraineeChain(chain)
if len(boundJoints) % 2 == 1:
#switch_logger.debug('Mid point ODD moved, # bound = {}'.format(len(boundJoints)))
i = int(len(boundJoints) / 2) + 1
xform( chestCtrl.subControl['mid'], ws=True, t=xform(boundJoints[i], q=True, ws=True, t=True) )
else:
i = int(len(boundJoints) / 2)
xform( chestCtrl.subControl['mid'], ws=True, t=xform(boundJoints[i], q=True, ws=True, t=True) )
#switch_logger.debug('Mid point EVEN moved, # bound = {}'.format(len(boundJoints)))
# FK match joints beyond the chest control
if children:
print(rot, pos)
xform(chestCtrl.subControl['offset'], ws=True, ro=rot)
xform(chestCtrl.subControl['offset'], ws=True, t=pos)
"""
# Find all children joints
jointData = []
def getChildrenPositions(jnt, jointData):
children = listRelatives(jnt, type='joint')
for child in children:
bpChild = rig.getBPJoint( child )
if bpChild.card == card:
jointData.append( (
child,
xform(child, q=True, ws=True, ro=True),
xform(child, q=True, ws=True, p=True)
) )
getChildrenPositions(child, jointData)
break
getChildrenPositions(endJoint, jointData)
for j, rot, pos in jointData:
pass
"""
class activator(object):
@staticmethod
def prep(chestCtrl):
matcher = util.getMatcher(chestCtrl)
endJoint = PyNode( parentConstraint(matcher, q=True, tl=True)[0] )
endBpj = rig.getBPJoint(endJoint)
card = endBpj.card
if chestCtrl.isPrimarySide:
children = [c.real for c in endBpj.proxyChildren if not c.isHelper and c.card == card]
else:
children = [c.realMirror for c in endBpj.proxyChildren if not c.isHelper and c.card == card]
midJnt = chestCtrl.subControl['mid'].listRelatives(type='joint')[0]
skin = listConnections(midJnt, type='skinCluster')
curveShape = skin[0].outputGeometry[0].listConnections(p=True)[0].node()
ikHandle = curveShape.worldSpace.listConnections( type='ikHandle' )[0]
chain = util.getChainFromIk(ikHandle)
boundJoints = util.getConstraineeChain(chain)
stomachIndex = int(len(boundJoints) / 2) + 1 if len(boundJoints) % 2 == 1 else int(len(boundJoints) / 2)
return {
'matcher': matcher,
'extraFk': children,
'stomach': boundJoints[stomachIndex]
}
@staticmethod
def harvest(data):
values = {
'matcher': util.worldInfo(data['matcher']),
'stomach': util.worldInfo(data['stomach']),
'extraFk': util.worldInfo(data['extraFk'][0]) if data['extraFk'] else None,
}
return values
@staticmethod
def apply(data, values, chestCtrl):
util.applyWorldInfo(chestCtrl, values['matcher'])
util.applyWorldInfo(chestCtrl.subControl['mid'], values['stomach'])
if values['extraFk']:
util.applyWorldInfo(chestCtrl.subControl['offset'], values['extraFk']) | {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/rigging/splineChest.py",
"copies": "1",
"size": "16906",
"license": "bsd-3-clause",
"hash": 6263699941392379000,
"line_mean": 37.8666666667,
"line_max": 175,
"alpha_frac": 0.6336211996,
"autogenerated": false,
"ratio": 3.448796409628723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45824176092287233,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from glue.core import Subset
from glue.core.subset import MaskSubsetState
__all__ = ['SubsetMaskImporter', 'SubsetMaskExporter']
class SubsetMaskImporter(object):
def get_filename_and_reader(self):
raise NotImplementedError
def run(self, data_or_subset, data_collection):
filename, reader = self.get_filename_and_reader()
if filename is None:
return
# Read in the masks
masks = reader(filename)
# Make sure shape is unique
shapes = set(mask.shape for mask in masks.values())
if len(shapes) == 0:
raise ValueError("No subset masks were returned")
elif len(shapes) > 1:
raise ValueError("Not all subsets have the same shape")
if list(shapes)[0] != data_or_subset.shape:
raise ValueError("Mask shape {0} does not match data shape {1}".format(list(shapes)[0], data_or_subset.shape))
if isinstance(data_or_subset, Subset):
subset = data_or_subset
if len(masks) != 1:
raise ValueError("Can only read in a single subset when importing into a subset")
mask = list(masks.values())[0]
subset_state = MaskSubsetState(mask, subset.pixel_component_ids)
subset.subset_state = subset_state
else:
data = data_or_subset
for label, mask in masks.items():
subset_state = MaskSubsetState(mask, data.pixel_component_ids)
data_collection.new_subset_group(label=label, subset_state=subset_state)
class SubsetMaskExporter(object):
def get_filename_and_writer(self):
raise NotImplementedError
def run(self, data_or_subset):
filename, writer = self.get_filename_and_writer()
if filename is None:
return
# Prepare dictionary of masks
masks = OrderedDict()
if isinstance(data_or_subset, Subset):
subset = data_or_subset
masks[subset.label] = subset.to_mask()
else:
data = data_or_subset
if len(data.subsets) == 0:
raise ValueError("Data has no subsets")
for subset in data.subsets:
masks[subset.label] = subset.to_mask()
writer(filename, masks)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/io/subset_mask.py",
"copies": "3",
"size": "2415",
"license": "bsd-3-clause",
"hash": 8630933212774345000,
"line_mean": 25.8333333333,
"line_max": 122,
"alpha_frac": 0.6082815735,
"autogenerated": false,
"ratio": 4.281914893617022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003285568665516205,
"num_lines": 90
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from pymel.core import delete, dt, group, hide, ikHandle, orientConstraint, parentConstraint, poleVectorConstraint, pointConstraint, PyNode, xform
from ....add import simpleName
from .... import core
from .... import lib
from .... import nodeApi
from .. import controllerShape
from ..cardRigging import MetaControl, ParamInfo
from .. import space
from . import _util as util
from .. import rig
from .. import node
@util.adds('stretch', 'bend', 'length')
@util.defaultspec( {'shape': 'box', 'size': 10, 'color': 'green 0.22' },
pv={'shape': 'sphere', 'size': 5, 'color': 'green 0.22' },
socket={'shape': 'sphere', 'size': 5, 'color': 'green 0.22', 'visGroup': 'socket' } )
def buildDogleg(hipJoint, end, pvLen=None, name='Dogleg', endOrientType=util.EndOrient.TRUE_ZERO_FOOT, groupName='', controlSpec={}):
'''
.. todo::
* Specify toe joint instead to remove ambiguity in case of twist joints.
* For some reason, sometimes, twist must be introduced because some flippin
occurs. For some reason the poleVector doesn't come in straight on.
* Need to determine if a 180 twist is needed as the minotaur did.
* Need to figure out the best way to constrain the last joint to the controller
'''
boundChain = util.getChain(hipJoint, end)
container = group(n=name + '_dogHindleg', em=True, p=node.mainGroup())
# &&& I think I want to turn this into the container for all extra stuff related to a given control
chainGrp = group( p=container, n=name + "_ikChain", em=True )
parentConstraint( hipJoint.getParent(), chainGrp, mo=True )
# Make the control to translate/offset the limb's socket.
socketOffset = controllerShape.build( name + '_socket', controlSpec['socket'], type=controllerShape.ControlType.TRANSLATE )
core.dagObj.lockScale(socketOffset)
core.dagObj.lockRot(socketOffset)
core.dagObj.moveTo( socketOffset, hipJoint )
socketZero = core.dagObj.zero(socketOffset)
socketZero.setParent( chainGrp )
footCtrl = controllerShape.build( name, controlSpec['main'], type=controllerShape.ControlType.IK)
core.dagObj.lockScale(footCtrl)
footCtrl.addAttr( 'bend', at='double', k=True )
core.dagObj.moveTo( footCtrl, end )
if endOrientType == util.EndOrient.TRUE_ZERO:
util.trueZeroSetup(end, footCtrl)
elif endOrientType == util.EndOrient.TRUE_ZERO_FOOT:
util.trueZeroFloorPlane(end, footCtrl)
elif endOrientType == util.EndOrient.JOINT:
core.dagObj.matchTo(footCtrl, end)
footCtrl.rx.set( util.shortestAxis(footCtrl.rx.get()) )
footCtrl.ry.set( util.shortestAxis(footCtrl.ry.get()) )
footCtrl.rz.set( util.shortestAxis(footCtrl.rz.get()) )
core.dagObj.zero(footCtrl)
elif endOrientType == util.EndOrient.WORLD:
# Do nothing, it's built world oriented
pass
util.createMatcher(footCtrl, end).setParent(container)
# Make the main ik chain which gives overall compression
masterChain = util.dupChain(hipJoint, end)
masterChain[0].rename( simpleName(hipJoint, '{0}_OverallCompression') )
mainIk = ikHandle( sol='ikRPsolver', sj=masterChain[0], ee=masterChain[-1] )[0]
PyNode('ikSpringSolver').message >> mainIk.ikSolver
mainIk.rename('mainIk')
hide(mainIk)
springFixup = group(em=True, n='SprinkIkFix')
springFixup.inheritsTransform.set(False)
springFixup.inheritsTransform.lock()
springFixup.setParent( socketOffset )
pointConstraint( socketOffset, springFixup )
masterChain[0].setParent( springFixup )
#pointConstraint( socketOffset, hipJoint )
# Create the polevector. This needs to happen first so things don't flip out later
out = util.calcOutVector(masterChain[0], masterChain[1], masterChain[-1])
if not pvLen or pvLen < 0:
pvLen = util.chainLength(masterChain[1:]) * 0.5
pvPos = out * pvLen + dt.Vector(xform(boundChain[1], q=True, ws=True, t=True))
pvCtrl = controllerShape.build( name + '_pv', controlSpec['pv'], type=controllerShape.ControlType.POLEVECTOR )
core.dagObj.lockScale(pvCtrl)
core.dagObj.lockRot(pvCtrl)
xform(pvCtrl, ws=True, t=pvPos)
poleVectorConstraint( pvCtrl, mainIk )
# Verify the knees are in the same place
delta = boundChain[1].getTranslation('world') - masterChain[1].getTranslation('world')
if delta.length() > 0.1:
mainIk.twist.set(180)
# Make sub IKs so the chain can be offset
offsetChain = util.dupChain(hipJoint, end)
hide(offsetChain[0])
offsetChain[0].rename( 'OffsetChain' )
offsetChain[0].setParent(container)
controllerShape.connectingLine(pvCtrl, offsetChain[1] )
constraints = util.constrainAtoB( util.getChain(hipJoint, end), offsetChain, mo=False )
pointConstraint( masterChain[0], offsetChain[0] )
ankleIk = ikHandle( sol='ikRPsolver', sj=offsetChain[0], ee=offsetChain[-2])[0]
offsetIk = ikHandle( sol='ikRPsolver', sj=offsetChain[-2], ee=offsetChain[-1])[0]
offsetIk.rename('metatarsusIk')
offsetControl = group(em=True, n='OffsetBend')
offsetContainer = group(offsetControl, n='OffsetSpace')
offsetContainer.setParent(footCtrl)
# Setup the offsetContainer so it is properly aligned to bend on z
offsetContainer.setParent( masterChain[-1] )
offsetContainer.t.set(0, 0, 0)
#temp = aimConstraint( pvCtrl, offsetContainer, aim=[1, 0, 0], wut='object', wuo=hipJoint, u=[0, 1, 0])
#delete( temp )
'''
NEED TO CHANGE THE ORIENTATION
Must perfectly align with ankle segment so the offset ikhandle can translate
according to how much things are scaled
'''
lib.anim.orientJoint(offsetContainer, boundChain[-2], upTarget=boundChain[-3], aim='y', up='x')
#mimic old way lib.anim.orientJoint(offsetContainer, pvCtrl, upTarget=hipJoint, aim='x', up='y')
#lib.anim.orientJoint(offsetContainer, pvCtrl, upTarget=hipJoint, aim='x', up='y')
offsetControl.t.set(0, 0, 0)
offsetControl.t.lock()
offsetControl.r.set(0, 0, 0)
footCtrl.bend >> offsetControl.rz
'''
This is really dumb.
Sometimes maya will rotate everything by 180 but I'm not sure how to
calculate the proper offset, which normally results in one axis being off
by 360, so account for that too.
'''
temp = orientConstraint( footCtrl, offsetChain[-1], mo=True)
if not core.math.isClose( offsetChain[-1].r.get(), [0, 0, 0] ):
badVals = offsetChain[-1].r.get()
delete(temp)
offsetChain[-1].r.set( -badVals )
temp = orientConstraint( footCtrl, offsetChain[-1], mo=True)
for a in 'xyz':
val = offsetChain[-1].attr('r' + a).get()
if abs(val - 360) < 0.00001:
attr = temp.attr( 'offset' + a.upper() )
attr.set( attr.get() - 360 )
elif abs(val + 360) < 0.00001:
attr = temp.attr( 'offset' + a.upper() )
attr.set( attr.get() + 360 )
# Hopefully the end of dumbness
ankleIk.setParent( offsetControl )
# Adjust the offset ikHandle according to how long the final bone is.
if masterChain[-1].tx.get() > 0:
masterChain[-1].tx >> ankleIk.ty
else:
core.math.multiply(masterChain[-1].tx, -1.0) >> ankleIk.ty
ankleIk.tx.lock()
ankleIk.tz.lock()
#ankleIk.t.lock()
mainIk.setParent( footCtrl )
offsetIk.setParent( footCtrl )
core.dagObj.zero(footCtrl).setParent( container )
hide(masterChain[0], ankleIk, offsetIk)
poleVectorConstraint( pvCtrl, ankleIk )
poleVectorConstraint( pvCtrl, offsetIk )
# Adding the pv constraint might require a counter rotation of the offsetIk
counterTwist = offsetChain[-2].rx.get() * (1.0 if offsetChain[-2].tx.get() < 0 else -1.0)
offsetIk.twist.set( counterTwist )
core.dagObj.zero(pvCtrl).setParent( container )
# Make stretchy ik, but the secondary chain needs the stretch hooked up too.
rig.makeStretchyNonSpline(footCtrl, mainIk)
#for src, dest in zip( util.getChain(masterChain, masterEnd)[1:], util.getChain( hipJoint, getDepth(hipJoint, 4) )[1:] ):
# src.tx >> dest.tx
for src, dest in zip( masterChain[1:], offsetChain[1:] ):
src.tx >> dest.tx
footCtrl = nodeApi.RigController.convert(footCtrl)
footCtrl.container = container
footCtrl.subControl['socket'] = socketOffset
footCtrl.subControl['pv'] = pvCtrl
# Add default spaces
space.addMain( pvCtrl )
space.add( pvCtrl, footCtrl )
space.add( pvCtrl, footCtrl, mode=space.Mode.TRANSLATE)
if hipJoint.getParent():
space.add( pvCtrl, hipJoint.getParent())
space.addMain( footCtrl )
space.add( footCtrl, hipJoint.getParent() )
return footCtrl, constraints
class DogHindleg(MetaControl):
''' 4 joint dog hindleg. '''
ik_ = 'pdil.tool.fossil.rigging.dogHindLeg.buildDogleg'
fkArgs = {'translatable': True}
ikInput = OrderedDict( [
('name', ParamInfo( 'Name', 'Name', ParamInfo.STR, 'Leg')),
('pvLen', ParamInfo('PV Length', 'How far the pole vector should be from the chain', ParamInfo.FLOAT, default=0) ),
('endOrientType', ParamInfo('Control Orient', 'How to orient the last control', ParamInfo.ENUM, default=util.EndOrient.TRUE_ZERO_FOOT, enum=util.EndOrient.asChoices())),
] )
def activateIk(ctrl):
# Get the last ik chunk but expand it to include the rest of the limb joints
for ik in ctrl.listRelatives(type='ikHandle'):
if not ik.name().count( 'mainIk' ):
break
else:
raise Exception('Unable to determin IK handle on {0} to match'.format(ctrl))
chain = util.getChainFromIk(ik)
chain.insert( 0, chain[0].getParent() )
chain.insert( 0, chain[0].getParent() )
bound = util.getConstraineeChain(chain)
# Move the main control to the end point
#xform(ctrl, ws=True, t=xform(bound[-1], q=True, ws=True, t=True) )
util.alignToMatcher(ctrl)
# Place the pole vector away
out = rig.calcOutVector(bound[0], bound[1], bound[-2])
length = abs(sum( [b.tx.get() for b in bound[1:]] ))
out *= length
pvPos = xform( bound[1], q=True, ws=True, t=True ) + out
xform( ctrl.subControl['pv'], ws=True, t=pvPos )
# Figure out the bend, (via trial and error at the moment)
def setBend():
angle, axis = util.angleBetween( bound[-2], bound[-1], chain[-2] )
current = ctrl.bend.get()
''' This is an attempt to look at the axis to determine what direction to bend
if abs(axis[0]) > abs(axis[1]) and abs(axis[0]) > abs(axis[2]):
signAxis = axis[0]
elif abs(axis[1]) > abs(axis[0]) and abs(axis[1]) > abs(axis[2]):
signAxis = axis[1]
elif abs(axis[2]) > abs(axis[0]) and abs(axis[2]) > abs(axis[1]):
signAxis = axis[2]
'''
d = core.dagObj.distanceBetween(bound[-2], chain[-2])
ctrl.bend.set( current + angle )
if core.dagObj.distanceBetween(bound[-2], chain[-2]) > d:
ctrl.bend.set( current - angle )
setBend()
# Try to correct for errors a few times because the initial bend might
# prevent the foot from being placed all the way at the end.
# Can't try forever in case the FK is off plane.
'''
The *right* way to do this. Get the angle between
cross the 2 vectors for the out vector
cross the out vector with the original vector for the "right angle" vector
Now dot that with the 2nd vector (and possibly get angle?) if it's less than 90 rotate one direction
'''
if core.dagObj.distanceBetween(bound[-2], chain[-2]) > 0.1:
setBend()
if core.dagObj.distanceBetween(bound[-2], chain[-2]) > 0.1:
setBend()
if core.dagObj.distanceBetween(bound[-2], chain[-2]) > 0.1:
setBend()
class activator(object):
@staticmethod
def prep(ctrl):
# Get the last ik chunk but expand it to include the rest of the limb joints
for ik in ctrl.listRelatives(type='ikHandle'):
if not ik.name().count( 'mainIk' ):
break
else:
raise Exception('Unable to determine IK handle on {0} to match'.format(ctrl))
chain = util.getChainFromIk(ik)
chain.insert( 0, chain[0].getParent() )
chain.insert( 0, chain[0].getParent() )
bound = util.getConstraineeChain(chain)
return {
'matcher': util.getMatcher(ctrl),
'hip': bound[0],
'knee': bound[1],
'ankle': bound[2],
'ball': bound[3],
'chain_2': chain[-2],
}
@staticmethod
def harvest(data):
return {
'matcher': util.worldInfo( data['matcher']),
'hip': util.worldInfo( data['hip']),
'knee': util.worldInfo( data['knee']),
'ankle': util.worldInfo( data['ankle']),
'ball': util.worldInfo( data['ball']),
'length': abs(sum( [b.tx.get() for b in (data['knee'], data['ankle'], data['ball'])] )),
}
@staticmethod
def apply(data, values, ctrl):
out = rig.calcOutVector(dt.Vector(values['hip'][0]), dt.Vector(values['knee'][0]), dt.Vector(values['ball'][0]))
out *= values['length']
pvPos = values['knee'][0] + out
util.applyWorldInfo(ctrl, values['matcher'])
xform( ctrl.subControl['pv'], ws=True, t=pvPos )
# Figure out the bend, (via trial and error at the moment)
def setBend():
angle, axis = util.angleBetween( data['ankle'], data['ball'], data['chain_2'] )
current = ctrl.bend.get()
''' This is an attempt to look at the axis to determine what direction to bend
if abs(axis[0]) > abs(axis[1]) and abs(axis[0]) > abs(axis[2]):
signAxis = axis[0]
elif abs(axis[1]) > abs(axis[0]) and abs(axis[1]) > abs(axis[2]):
signAxis = axis[1]
elif abs(axis[2]) > abs(axis[0]) and abs(axis[2]) > abs(axis[1]):
signAxis = axis[2]
'''
d = core.dagObj.distanceBetween(data['ankle'], data['chain_2'])
ctrl.bend.set( current + angle )
if core.dagObj.distanceBetween(data['ankle'], data['chain_2']) > d:
ctrl.bend.set( current - angle )
setBend()
if core.dagObj.distanceBetween(data['ankle'], data['chain_2']) > 0.1:
setBend()
if core.dagObj.distanceBetween(data['ankle'], data['chain_2']) > 0.1:
setBend()
if core.dagObj.distanceBetween(data['ankle'], data['chain_2']) > 0.1:
setBend() | {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/rigging/dogHindLeg.py",
"copies": "1",
"size": "15149",
"license": "bsd-3-clause",
"hash": 6532037874757002000,
"line_mean": 37.8461538462,
"line_max": 177,
"alpha_frac": 0.6207010364,
"autogenerated": false,
"ratio": 3.4453036161018877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.939473000451742,
"avg_score": 0.034254929596893656,
"num_lines": 390
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import pytest
import numpy as np
from matplotlib.axes import Axes
from mock import MagicMock, patch
from numpy.testing import assert_array_equal
from glue.core.tests.test_state import clone
from glue.core.tests.util import simple_session
from glue.core.subset import SubsetState
from glue.core import Data
from glue import custom_viewer
from glue.app.qt import GlueApplication
from glue.app.qt.tests.test_application import check_clone_app
from ..custom_viewer import (FormElement, NumberElement,
ChoiceElement, CustomViewer,
CustomSubsetState, AttributeInfo,
FloatElement, TextBoxElement, SettingsOracle,
MissingSettingError, FrozenSettings)
def _make_widget(viewer):
s = simple_session()
return viewer._widget_cls(s)
viewer = custom_viewer('Testing Custom Viewer',
a=(0, 100),
b='att',
c='att(x)',
d=True,
e=False,
f=['a', 'b', 'c'],
g=OrderedDict(a=1, b=2, c=3),
h=64
)
setup = MagicMock()
settings_changed = MagicMock()
plot_subset = MagicMock()
plot_data = MagicMock()
make_selector = MagicMock()
make_selector.return_value = MagicMock(spec=SubsetState)
make_selector().copy.return_value = MagicMock(spec=SubsetState)
make_selector().copy().to_mask.return_value = np.array([False])
@viewer.setup
def _setup(axes):
setup(axes)
@viewer.plot_data
def _plot_data(axes, a, b, g, h):
plot_data(axes=axes, a=a, b=b, g=g, h=h)
return []
@viewer.plot_subset
def _plot_subset(b, c, d, e, f, style):
plot_subset(b=b, c=c, d=d, e=e, f=f, style=style)
return []
@viewer.settings_changed
def _settings_changed(state):
settings_changed(state=state)
@viewer.make_selector
def _make_selector(roi, c):
make_selector(roi=roi, c=c)
return SubsetState()
def test_custom_classes_dont_share_methods():
"""Regression test for #479"""
a = custom_viewer('a')
b = custom_viewer('b')
assert a._custom_functions is not b._custom_functions
class ViewerSubclass(CustomViewer):
a = (0, 100)
b = 'att'
c = 'att(x)'
d = True
e = False
f = ['a', 'b', 'c']
g = OrderedDict(a=1, b=2, c=3)
h = 64
def setup(self, axes):
return setup(axes)
def plot_data(self, axes, a, b, g, h):
return plot_data(axes=axes, a=a, b=b, g=g, h=h)
def plot_subset(self, b, c, d, e, f, style):
return plot_subset(b=b, c=c, d=d, e=e, f=f, style=style)
def settings_changed(self, state):
return settings_changed(state=state)
def make_selector(self, roi, c):
return make_selector(roi=roi, c=c)
class TestCustomViewer(object):
def setup_class(self):
self.viewer = viewer
def setup_method(self, method):
setup.reset_mock()
settings_changed.reset_mock()
plot_subset.reset_mock()
plot_data.reset_mock()
make_selector.reset_mock()
self.data = Data(x=[1, 2, 3], y=[2, 3, 4])
self.session = simple_session()
self.dc = self.session.data_collection
self.dc.append(self.data)
def teardown_method(self, method):
if hasattr(self, 'w'):
self.w.unregister(self.session.hub)
def build(self):
w = self.viewer._widget_cls(self.session)
w.register_to_hub(self.session.hub)
self.w = w
return w
def test_setup_called_on_init(self):
ct = setup.call_count
self.build()
assert setup.call_count == ct + 1
def test_separate_widgets_have_separate_state(self):
w1 = self.build()
w2 = self.build()
assert w1._coordinator is not w2._coordinator
assert w1._coordinator.state is not w2._coordinator.state
def test_plot_data(self):
w = self.build()
w.add_data(self.data)
a, k = plot_data.call_args
assert isinstance(k['axes'], Axes)
assert set(k.keys()) == set(('axes', 'a', 'b', 'g', 'h'))
assert k['a'] == 50
assert k['g'] == 1
assert k['h'] == 64
def test_plot_subset(self):
w = self.build()
w.add_data(self.data)
self.dc.new_subset_group(subset_state=self.data.id['x'] > 2)
a, k = plot_subset.call_args
assert set(k.keys()) == set(('b', 'c', 'd', 'e', 'f', 'style'))
assert_array_equal(k['b'].values, [3])
assert_array_equal(k['c'].values, [3])
assert k['d']
assert not k['e']
assert k['f'] == 'a'
def test_make_selector(self):
w = self.build()
roi = MagicMock()
w.client.apply_roi(roi)
a, k = make_selector.call_args
assert set(k.keys()) == set(('roi', 'c'))
assert k['roi'] is roi
def test_settings_change(self):
w = self.build()
ct = settings_changed.call_count
w._coordinator._settings['d'].ui.setChecked(False)
assert settings_changed.call_count == ct + 1
a, k = settings_changed.call_args
assert 'state' in k
def test_register(self):
with patch('glue.viewers.custom.qt.FormElement.register_to_hub') as r:
w = self.build()
assert r.call_count > 0
def test_component(self):
w = self.build()
w.add_data(self.data)
assert_array_equal(w._coordinator.value('b', layer=self.data).values,
[1, 2, 3])
def test_component_autoupdate(self):
w = self.build()
w.add_data(self.data)
assert w._coordinator._settings['b'].ui.count() == 2
self.data.add_component([10, 20, 30], label='c')
assert w._coordinator._settings['b'].ui.count() == 3
def test_settings_changed_called_on_init(self):
w = self.build()
assert settings_changed.call_count == 1
def test_selections_enabled(self):
w = self.build()
assert w._coordinator.selections_enabled
assert 'select:rectangle' in w.toolbar.tools
assert 'select:polygon' in w.toolbar.tools
def test_state_save():
app = GlueApplication()
w = app.new_data_viewer(viewer._widget_cls)
check_clone_app(app)
def test_state_save_with_data_layers():
app = GlueApplication()
dc = app.data_collection
d = Data(x=[1, 2, 3], label='test')
dc.append(d)
w = app.new_data_viewer(viewer._widget_cls)
w.add_data(d)
check_clone_app(app)
class TestCustomSelectMethod(object):
def setup_class(self):
self.viewer = custom_viewer('CustomSelectViewer',
x='att(x)', flip=False)
@self.viewer.select
def select(roi, x, flip):
if flip:
return x <= 1
return x > 1
def setup_method(self, method):
self.data = Data(x=[1, 2, 3], y=[2, 3, 4])
self.session = simple_session()
self.dc = self.session.data_collection
self.dc.append(self.data)
def build(self):
return self.viewer._widget_cls(self.session)
def test_state(self):
w = self.build()
v = w._coordinator
roi = MagicMock()
s = CustomSubsetState(type(v), roi, v.settings())
assert_array_equal(s.to_mask(self.data), [False, True, True])
def test_state_view(self):
w = self.build()
v = w._coordinator
roi = MagicMock()
s = CustomSubsetState(type(v), roi, v.settings())
assert_array_equal(s.to_mask(self.data, view=slice(None, None, 2)),
[False, True])
def test_settings_frozen_at_creation(self):
w = self.build()
v = w._coordinator
roi = MagicMock()
s = CustomSubsetState(type(v), roi, v.settings())
w.flip = True
assert_array_equal(s.to_mask(self.data), [False, True, True])
def test_save_load(self):
w = self.build()
v = w._coordinator
roi = None
s = CustomSubsetState(type(v), roi, v.settings())
s2 = clone(s)
assert_array_equal(s2.to_mask(self.data), [False, True, True])
class TestCustomViewerSubclassForm(TestCustomViewer):
def setup_class(self):
self.viewer = ViewerSubclass
class TestFormElements(object):
def test_number_default_value(self):
e = FormElement.auto((0, 100, 30))
assert e.value() == 30
def test_number_float(self):
e = FormElement.auto((0.0, 1.0, 0.3))
assert e.value() == 0.3
def test_number_list(self):
e = FormElement.auto([0, 10])
assert isinstance(e, NumberElement)
def test_choice_list(self):
e = FormElement.auto(['a', 'b'])
assert isinstance(e, ChoiceElement)
def test_choice_tuple(self):
e = FormElement.auto(('a', 'b'))
assert isinstance(e, ChoiceElement)
def test_float(self):
e = FormElement.auto(1.2)
assert isinstance(e, FloatElement)
e = FormElement.auto(2)
assert isinstance(e, FloatElement)
assert e.value() == 2
def test_textbox(self):
e = FormElement.auto('_str')
assert isinstance(e, TextBoxElement)
assert e.value() == 'str'
def test_recognizes_subsubclasses(self):
class SubClassFormElement(TextBoxElement):
@classmethod
def recognizes(cls, params):
return params == 'specific_class'
e = FormElement.auto('specific_class')
assert isinstance(e, SubClassFormElement)
def test_unrecognized(self):
with pytest.raises(ValueError):
e = FormElement.auto(None)
class TestAttributeInfo(object):
def setup_method(self, method):
d = Data(x=[1, 2, 3, 4, 5], c=['a', 'b', 'a', 'a', 'b'], label='test')
s = d.new_subset()
s.subset_state = d.id['x'] > 2
self.d = d
self.s = s
def test_numerical(self):
v = AttributeInfo.from_layer(self.d, self.d.id['x'])
assert_array_equal(v, [1, 2, 3, 4, 5])
assert v.id == self.d.id['x']
assert v.categories is None
def test_categorical(self):
v = AttributeInfo.from_layer(self.d, self.d.id['c'])
assert_array_equal(v, [0, 1, 0, 0, 1])
assert v.id == self.d.id['c']
assert_array_equal(v.categories, ['a', 'b'])
def test_subset(self):
v = AttributeInfo.from_layer(self.s, self.d.id['x'])
assert_array_equal(v, [3, 4, 5])
assert v.id == self.d.id['x']
assert v.categories is None
def test_has_component(self):
v = AttributeInfo.from_layer(self.s, self.d.id['x'])
comp = self.s.data.get_component(self.d.id['x'])
assert v._component == comp
class TestSettingsOracle(object):
def test_oracle_raises_original_error(self):
class BadFormElement(TextBoxElement):
def value(self, layer=None, view=None):
raise AttributeError('Inner Error')
oracle = SettingsOracle({'bad_form': BadFormElement('str("text")')})
try:
oracle('bad_form')
assert False
except AttributeError as err:
assert 'Inner Error' in err.args
def test_oracle_raises_missing(self):
oracle = SettingsOracle({'Form': TextBoxElement('_text')})
with pytest.raises(MissingSettingError):
oracle('missing')
def test_frozen_oracle_raises_missing(self):
oracle = FrozenSettings()
with pytest.raises(MissingSettingError):
oracle.value('missing')
def test_load_reserved_words(self):
_self = MagicMock()
layer = MagicMock()
style = layer.style
extra = MagicMock()
oracle = SettingsOracle({}, _self=_self,
layer=layer,
extra=extra)
assert oracle('self') == _self
assert oracle('layer') == layer
assert oracle('style') == style
assert oracle('extra') == extra
def test_setting_names(self):
oracle = SettingsOracle({'Form': TextBoxElement('_text')})
assert sorted(oracle.setting_names()) == sorted(['style', 'layer', 'Form'])
def test_raises_if_overlapping_reserved_words(self):
with pytest.raises(AssertionError):
SettingsOracle({'self': TextBoxElement('_text')})
def test_two_custom_viewer_classes():
class MyWidget1(CustomViewer):
text_box1_Widget1 = '_Hello'
def setup(self, text_box1_Widget1):
pass
class MyWidget2(CustomViewer):
text_box1_Widget2 = '_Hello'
text_box2_Widget2 = '_world'
def setup(self, text_box1_Widget2, text_box2_Widget2):
pass
app = GlueApplication()
dc = app.data_collection
d = Data(x=[1, 2, 3], label='test')
dc.append(d)
app.new_data_viewer(MyWidget1._widget_cls)
app.new_data_viewer(MyWidget2._widget_cls)
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/custom/qt/tests/test_custom_viewer.py",
"copies": "1",
"size": "13164",
"license": "bsd-3-clause",
"hash": -592312872880399200,
"line_mean": 27.2489270386,
"line_max": 83,
"alpha_frac": 0.5775600122,
"autogenerated": false,
"ratio": 3.533959731543624,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46115197437436245,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import uuid
import numpy as np
import pandas as pd
from glue.external import six
from glue.core.message import (DataUpdateMessage, DataRemoveComponentMessage,
DataAddComponentMessage, NumericalDataChangedMessage,
SubsetCreateMessage, ComponentsChangedMessage,
ComponentReplacedMessage, DataReorderComponentMessage)
from glue.core.decorators import clear_cache
from glue.core.util import split_component_view
from glue.core.hub import Hub
from glue.core.subset import Subset, SubsetState
from glue.core.component_id import ComponentIDList
from glue.core.component_link import ComponentLink, CoordinateComponentLink
from glue.core.exceptions import IncompatibleAttribute
from glue.core.visual import VisualAttributes
from glue.core.coordinates import Coordinates
from glue.core.contracts import contract
from glue.config import settings
from glue.utils import view_shape
# Note: leave all the following imports for component and component_id since
# they are here for backward-compatibility (the code used to live in this
# file)
from glue.core.component import Component, CoordinateComponent, DerivedComponent
from glue.core.component_id import ComponentID, ComponentIDDict, PixelComponentID
__all__ = ['Data']
class Data(object):
"""The basic data container in Glue.
The data object stores data as a collection of
:class:`~glue.core.component.Component` objects. Each component stored in a
dataset must have the same shape.
Catalog data sets are stored such that each column is a distinct
1-dimensional :class:`~glue.core.component.Component`.
There are several ways to extract the actual numerical data stored in a
:class:`~glue.core.data.Data` object::
data = Data(x=[1, 2, 3], label='data')
xid = data.id['x']
data[xid]
data.get_component(xid).data
data['x'] # if 'x' is a unique component name
Likewise, datasets support :ref:`fancy indexing <numpy:basics.indexing>`::
data[xid, 0:2]
data[xid, [True, False, True]]
See also: :ref:`data_tutorial`
"""
def __init__(self, label="", coords=None, **kwargs):
"""
:param label: label for data
:type label: str
Extra array-like keywords are extracted into components
"""
# Coordinate conversion object
self.coords = coords or Coordinates()
self._shape = ()
# Components
self._components = OrderedDict()
self._pixel_component_ids = ComponentIDList()
self._world_component_ids = ComponentIDList()
self.id = ComponentIDDict(self)
# Metadata
self.meta = OrderedDict()
# Subsets of the data
self._subsets = []
# Hub that the data is attached to
self.hub = None
self.style = VisualAttributes(parent=self)
self._coordinate_links = None
self.data = self
self.label = label
self.edit_subset = None
for lbl, data in sorted(kwargs.items()):
self.add_component(data, lbl)
self._key_joins = {}
# To avoid circular references when saving objects with references to
# the data, we make sure that all Data objects have a UUID that can
# uniquely identify them.
self.uuid = str(uuid.uuid4())
@property
def subsets(self):
"""
Tuple of subsets attached to this dataset
"""
return tuple(self._subsets)
@property
def ndim(self):
"""
Dimensionality of the dataset
"""
return len(self.shape)
@property
def shape(self):
"""
Tuple of array dimensions, like :attr:`numpy.ndarray.shape`
"""
return self._shape
@property
def label(self):
""" Convenience access to data set's label """
return self._label
@label.setter
def label(self, value):
""" Set the label to value
"""
self._label = value
self.broadcast(attribute='label')
@property
def size(self):
"""
Total number of elements in the dataset.
"""
return np.product(self.shape)
@contract(component=Component)
def _check_can_add(self, component):
if isinstance(component, DerivedComponent):
return component._data is self
else:
if len(self._components) == 0:
return True
else:
if all(comp.shape == () for comp in self._components.values()):
return True
else:
return component.shape == self.shape
@contract(cid=ComponentID, returns=np.dtype)
def dtype(self, cid):
"""Lookup the dtype for the data associated with a ComponentID"""
# grab a small piece of data
ind = tuple([slice(0, 1)] * self.ndim)
arr = self[cid, ind]
return arr.dtype
@contract(component_id=ComponentID)
def remove_component(self, component_id):
""" Remove a component from a data set
:param component_id: the component to remove
:type component_id: :class:`~glue.core.component_id.ComponentID`
"""
if component_id in self._components:
self._components.pop(component_id)
if self.hub:
msg = DataRemoveComponentMessage(self, component_id)
self.hub.broadcast(msg)
msg = ComponentsChangedMessage(self)
self.hub.broadcast(msg)
@contract(other='isinstance(Data)',
cid='cid_like',
cid_other='cid_like')
def join_on_key(self, other, cid, cid_other):
"""
Create an *element* mapping to another dataset, by joining on values of
ComponentIDs in both datasets.
This join allows any subsets defined on `other` to be propagated to
self. The different ways to call this method are described in the
**Examples** section below.
Parameters
----------
other : :class:`~glue.core.data.Data`
Data object to join with
cid : str or :class:`~glue.core.component_id.ComponentID` or iterable
Component(s) in this dataset to use as a key
cid_other : str or :class:`~glue.core.component_id.ComponentID` or iterable
Component(s) in the other dataset to use as a key
Examples
--------
There are several ways to use this function, depending on how many
components are passed to ``cid`` and ``cid_other``.
**Joining on single components**
First, one can specify a single component ID for both ``cid`` and
``cid_other``: this is the standard mode, and joins one component from
one dataset to the other:
>>> d1 = Data(x=[1, 2, 3, 4, 5], k1=[0, 0, 1, 1, 2], label='d1')
>>> d2 = Data(y=[2, 4, 5, 8, 4], k2=[1, 3, 1, 2, 3], label='d2')
>>> d2.join_on_key(d1, 'k2', 'k1')
Selecting all values in ``d1`` where x is greater than 2 returns
the last three items as expected:
>>> s = d1.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([False, False, True, True, True], dtype=bool)
The linking was done between k1 and k2, and the values of
k1 for the last three items are 1 and 2 - this means that the
first, third, and fourth item in ``d2`` will then get selected,
since k2 has a value of either 1 or 2 for these items.
>>> s = d2.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([ True, False, True, True, False], dtype=bool)
**Joining on multiple components**
.. note:: This mode is currently slow, and will be optimized
significantly in future.
Next, one can specify several components for each dataset: in this
case, the number of components given should match for both datasets.
This causes items in both datasets to be linked when (and only when)
the set of keys match between the two datasets:
>>> d1 = Data(x=[1, 2, 3, 5, 5],
... y=[0, 0, 1, 1, 2], label='d1')
>>> d2 = Data(a=[2, 5, 5, 8, 4],
... b=[1, 3, 2, 2, 3], label='d2')
>>> d2.join_on_key(d1, ('a', 'b'), ('x', 'y'))
Selecting all items where x is 5 in ``d1`` in which x is a
component works as expected and selects the two last items::
>>> s = d1.new_subset()
>>> s.subset_state = d1.id['x'] == 5
>>> s.to_mask()
array([False, False, False, True, True], dtype=bool)
If we apply this selection to ``d2``, only items where a is 5
and b is 2 will be selected:
>>> s = d2.new_subset()
>>> s.subset_state = d1.id['x'] == 5
>>> s.to_mask()
array([False, False, True, False, False], dtype=bool)
and in particular, the second item (where a is 5 and b is 3) is not
selected.
**One-to-many and many-to-one joining**
Finally, you can specify one component in one dataset and multiple ones
in the other. In the case where one component is specified for this
dataset and multiple ones for the other dataset, then when an item
is selected in the other dataset, it will cause any item in the present
dataset which matches any of the keys in the other data to be selected:
>>> d1 = Data(x=[1, 2, 3], label='d1')
>>> d2 = Data(a=[1, 1, 2],
... b=[2, 3, 3], label='d2')
>>> d1.join_on_key(d2, 'x', ('a', 'b'))
In this case, if we select all items in ``d2`` where a is 2, this
will select the third item:
>>> s = d2.new_subset()
>>> s.subset_state = d2.id['a'] == 2
>>> s.to_mask()
array([False, False, True], dtype=bool)
Since we have joined the datasets using both a and b, we select
all items in ``d1`` where x is either the value or a or b
(2 or 3) which means we select the second and third item:
>>> s = d1.new_subset()
>>> s.subset_state = d2.id['a'] == 2
>>> s.to_mask()
array([False, True, True], dtype=bool)
We can also join the datasets the other way around:
>>> d1 = Data(x=[1, 2, 3], label='d1')
>>> d2 = Data(a=[1, 1, 2],
... b=[2, 3, 3], label='d2')
>>> d2.join_on_key(d1, ('a', 'b'), 'x')
In this case, selecting items in ``d1`` where x is 1 selects the
first item, as expected:
>>> s = d1.new_subset()
>>> s.subset_state = d1.id['x'] == 1
>>> s.to_mask()
array([ True, False, False], dtype=bool)
This then causes any item in ``d2`` where either a or b are 1
to be selected, i.e. the first two items:
>>> s = d2.new_subset()
>>> s.subset_state = d1.id['x'] == 1
>>> s.to_mask()
array([ True, True, False], dtype=bool)
"""
# To make things easier, we transform all component inputs to a tuple
if isinstance(cid, six.string_types) or isinstance(cid, ComponentID):
cid = (cid,)
if isinstance(cid_other, six.string_types) or isinstance(cid_other, ComponentID):
cid_other = (cid_other,)
if len(cid) > 1 and len(cid_other) > 1 and len(cid) != len(cid_other):
raise Exception("Either the number of components in the key join "
"sets should match, or one of the component sets "
"should contain a single component.")
def get_component_id(data, name):
if isinstance(name, ComponentID):
return name
else:
cid = data.find_component_id(name)
if cid is None:
raise ValueError("ComponentID not found in %s: %s" %
(data.label, name))
return cid
cid = tuple(get_component_id(self, name) for name in cid)
cid_other = tuple(get_component_id(other, name) for name in cid_other)
self._key_joins[other] = (cid, cid_other)
other._key_joins[self] = (cid_other, cid)
@contract(component='component_like', label='cid_like')
def add_component(self, component, label, hidden=False):
""" Add a new component to this data set.
:param component: object to add. Can be a Component,
array-like object, or ComponentLink
:param label:
The label. If this is a string,
a new :class:`glue.core.component_id.ComponentID` with this label will be
created and associated with the Component
:type component: :class:`~glue.core.component.Component` or
array-like
:type label: :class:`str` or :class:`~glue.core.component_id.ComponentID`
:raises:
TypeError, if label is invalid
ValueError if the component has an incompatible shape
:returns:
The ComponentID associated with the newly-added component
"""
if isinstance(component, ComponentLink):
return self.add_component_link(component, label=label, hidden=hidden)
if not isinstance(component, Component):
component = Component.autotyped(component)
if isinstance(component, DerivedComponent):
if len(self._components) == 0:
raise TypeError("Cannot add a derived component as a first component")
component.set_parent(self)
if not(self._check_can_add(component)):
raise ValueError("The dimensions of component %s are "
"incompatible with the dimensions of this data: "
"%r vs %r" % (label, component.shape, self.shape))
if isinstance(label, ComponentID):
component_id = label
if component_id.parent is None:
component_id.parent = self
else:
component_id = ComponentID(label, hidden=hidden, parent=self)
if len(self._components) == 0:
self._create_pixel_and_world_components(ndim=component.ndim)
# In some cases, such as when loading a session, we actually disable the
# auto-creation of pixel and world coordinates, so the first component
# may be a coordinate component with no shape. Therefore we only set the
# shape once a component has a valid shape rather than strictly on the
# first component.
if self._shape == () and component.shape != ():
self._shape = component.shape
is_present = component_id in self._components
self._components[component_id] = component
if self.hub and not is_present:
msg = DataAddComponentMessage(self, component_id)
self.hub.broadcast(msg)
msg = ComponentsChangedMessage(self)
self.hub.broadcast(msg)
return component_id
@contract(link=ComponentLink,
label='cid_like|None',
returns=DerivedComponent)
def add_component_link(self, link, label=None, hidden=False):
""" Shortcut method for generating a new :class:`~glue.core.component.DerivedComponent`
from a ComponentLink object, and adding it to a data set.
:param link: :class:`~glue.core.component_link.ComponentLink`
:param label: The ComponentID or label to attach to.
:type label: :class:`~glue.core.component_id.ComponentID` or str
:returns:
The :class:`~glue.core.component.DerivedComponent` that was added
"""
if label is not None:
if not isinstance(label, ComponentID):
label = ComponentID(label, parent=self, hidden=hidden)
link.set_to_id(label)
if link.get_to_id() is None:
raise TypeError("Cannot add component_link: "
"has no 'to' ComponentID")
dc = DerivedComponent(self, link)
to_ = link.get_to_id()
self.add_component(dc, label=to_, hidden=hidden)
return dc
def _create_pixel_and_world_components(self, ndim):
for i in range(ndim):
comp = CoordinateComponent(self, i)
label = pixel_label(i, ndim)
cid = PixelComponentID(i, "Pixel Axis %s" % label, hidden=True, parent=self)
self.add_component(comp, cid)
self._pixel_component_ids.append(cid)
if self.coords:
for i in range(ndim):
comp = CoordinateComponent(self, i, world=True)
label = self.coords.axis_label(i)
cid = self.add_component(comp, label, hidden=True)
self._world_component_ids.append(cid)
@property
def components(self):
"""All :class:`ComponentIDs <glue.core.component_id.ComponentID>` in the Data
:rtype: list
"""
return list(self._components.keys())
@property
def visible_components(self):
""" :class:`ComponentIDs <glue.core.component_id.ComponentID>` for all non-hidden components.
:rtype: list
"""
return [cid for cid, comp in self._components.items()
if not cid.hidden and not comp.hidden and cid.parent is self]
@property
def coordinate_components(self):
"""The ComponentIDs associated with a :class:`~glue.core.component.CoordinateComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
isinstance(self._components[c], CoordinateComponent)]
@property
def primary_components(self):
"""The ComponentIDs not associated with a :class:`~glue.core.component.DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
not isinstance(self._components[c], DerivedComponent)]
@property
def derived_components(self):
"""The ComponentIDs for each :class:`~glue.core.component.DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
isinstance(self._components[c], DerivedComponent)]
@property
def pixel_component_ids(self):
"""
The :class:`ComponentIDs <glue.core.component_id.ComponentID>` for each pixel coordinate.
"""
return self._pixel_component_ids
@property
def world_component_ids(self):
"""
The :class:`ComponentIDs <glue.core.component_id.ComponentID>` for each world coordinate.
"""
return self._world_component_ids
@contract(label='cid_like', returns='inst($ComponentID)|None')
def find_component_id(self, label):
""" Retrieve component_ids associated by label name.
:param label: ComponentID or string to search for
:returns:
The associated ComponentID if label is found and unique, else None.
First, this checks whether the component ID is present and unique in
the primary (non-derived) components of the data, and if not then
the derived components are checked. If there is one instance of the
label in the primary and one in the derived components, the primary
one takes precedence.
"""
for cid_set in (self.primary_components, self.derived_components):
result = []
for cid in cid_set:
if isinstance(label, ComponentID):
if cid is label:
result.append(cid)
else:
if cid.label == label:
result.append(cid)
if len(result) == 1:
return result[0]
elif len(result) > 1:
return None
return None
@property
def coordinate_links(self):
"""A list of the ComponentLinks that connect pixel and
world. If no coordinate transformation object is present,
return an empty list.
"""
if self._coordinate_links:
return self._coordinate_links
if not self.coords:
return []
if self.ndim != len(self._pixel_component_ids) or \
self.ndim != len(self._world_component_ids):
# haven't populated pixel, world coordinates yet
return []
def make_toworld_func(i):
def pix2world(*args):
return self.coords.pixel2world_single_axis(*args[::-1], axis=self.ndim - 1 - i)
return pix2world
def make_topixel_func(i):
def world2pix(*args):
return self.coords.world2pixel_single_axis(*args[::-1], axis=self.ndim - 1 - i)
return world2pix
result = []
for i in range(self.ndim):
link = CoordinateComponentLink(self._pixel_component_ids,
self._world_component_ids[i],
self.coords, i)
result.append(link)
link = CoordinateComponentLink(self._world_component_ids,
self._pixel_component_ids[i],
self.coords, i, pixel2world=False)
result.append(link)
self._coordinate_links = result
return result
@contract(axis=int, returns=ComponentID)
def get_pixel_component_id(self, axis):
"""Return the pixel :class:`glue.core.component_id.ComponentID` associated with a given axis
"""
return self._pixel_component_ids[axis]
@contract(axis=int, returns=ComponentID)
def get_world_component_id(self, axis):
"""Return the world :class:`glue.core.component_id.ComponentID` associated with a given axis
"""
return self._world_component_ids[axis]
@contract(returns='list(inst($ComponentID))')
def component_ids(self):
"""
Equivalent to :attr:`Data.components`
"""
return ComponentIDList(self._components.keys())
@contract(subset='isinstance(Subset)|None',
color='color|None',
label='string|None',
returns=Subset)
def new_subset(self, subset=None, color=None, label=None, **kwargs):
"""
Create a new subset, and attach to self.
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
:param subset: optional, reference subset or subset state.
If provided, the new subset will copy the logic of
this subset.
:returns: The new subset object
"""
nsub = len(self.subsets)
color = color or settings.SUBSET_COLORS[nsub % len(settings.SUBSET_COLORS)]
label = label or "%s.%i" % (self.label, nsub + 1)
new_subset = Subset(self, color=color, label=label, **kwargs)
if subset is not None:
new_subset.subset_state = subset.subset_state.copy()
self.add_subset(new_subset)
return new_subset
@contract(subset='inst($Subset, $SubsetState)')
def add_subset(self, subset):
"""Assign a pre-existing subset to this data object.
:param subset: A :class:`~glue.core.subset.Subset` or
:class:`~glue.core.subset.SubsetState` object
If input is a :class:`~glue.core.subset.SubsetState`,
it will be wrapped in a new Subset automatically
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
"""
if subset in self.subsets:
return # prevents infinite recursion
if isinstance(subset, SubsetState):
# auto-wrap state in subset
state = subset
subset = Subset(None)
subset.subset_state = state
self._subsets.append(subset)
if subset.data is not self:
subset.do_broadcast(False)
subset.data = self
subset.label = subset.label # hacky. disambiguates name if needed
if self.hub is not None:
msg = SubsetCreateMessage(subset)
self.hub.broadcast(msg)
subset.do_broadcast(True)
@contract(hub=Hub)
def register_to_hub(self, hub):
""" Connect to a hub.
This method usually doesn't have to be called directly, as
DataCollections manage the registration of data objects
"""
if not isinstance(hub, Hub):
raise TypeError("input is not a Hub object: %s" % type(hub))
self.hub = hub
@contract(attribute='string')
def broadcast(self, attribute):
"""
Send a :class:`~glue.core.message.DataUpdateMessage` to the hub
:param attribute: Name of an attribute that has changed (or None)
:type attribute: string
"""
if not self.hub:
return
msg = DataUpdateMessage(self, attribute=attribute)
self.hub.broadcast(msg)
@contract(old=ComponentID, new=ComponentID)
def update_id(self, old, new):
"""
Reassign a component to a different :class:`glue.core.component_id.ComponentID`
Parameters
----------
old : :class:`glue.core.component_id.ComponentID`
The old component ID
new : :class:`glue.core.component_id.ComponentID`
The new component ID
"""
if new is old:
return
if new.parent is None:
new.parent = self
changed = False
if old in self._components:
# We want to keep the original order, so we can't just do:
# self._components[new] = self._components[old]
# which will put the new component ID at the end, but instead
# we need to do:
self._components = OrderedDict((new, value) if key is old else (key, value)
for key, value in self._components.items())
changed = True
try:
index = self._pixel_component_ids.index(old)
self._pixel_component_ids[index] = new
changed = True
except ValueError:
pass
try:
index = self._world_component_ids.index(old)
self._world_component_ids[index] = new
changed = True
except ValueError:
pass
if changed and self.hub is not None:
# promote hidden status
new._hidden = new.hidden and old.hidden
# remove old component and broadcast the change
# see #508 for discussion of this
msg = ComponentReplacedMessage(self, old, new)
self.hub.broadcast(msg)
def __str__(self):
s = "Data Set: %s\n" % self.label
s += "Number of dimensions: %i\n" % self.ndim
s += "Shape: %s\n" % ' x '.join([str(x) for x in self.shape])
for hidden in [False, True]:
if hidden:
s += "Hidden "
else:
s += "Main "
s += "components:\n"
if hidden:
components = [c for c in self.components if c not in self.visible_components]
else:
components = [c for c in self.visible_components]
for i, cid in enumerate(components):
if cid.hidden != hidden:
continue
comp = self.get_component(cid)
if comp.units is None or comp.units == '':
s += " %i) %s\n" % (i, cid)
else:
s += " %i) %s [%s]\n" % (i, cid, comp.units)
return s[:-1]
def __repr__(self):
return 'Data (label: %s)' % self.label
def __setattr__(self, name, value):
if name == "hub" and hasattr(self, 'hub') \
and self.hub is not value and self.hub is not None:
raise AttributeError("Data has already been assigned "
"to a different hub")
object.__setattr__(self, name, value)
def __getitem__(self, key):
""" Shortcut syntax to access the numerical data in a component.
Equivalent to:
``component = data.get_component(component_id).data``
:param key:
The component to fetch data from
:type key: :class:`~glue.core.component_id.ComponentID`
:returns: :class:`~numpy.ndarray`
"""
key, view = split_component_view(key)
if isinstance(key, six.string_types):
_k = key
key = self.find_component_id(key)
if key is None:
raise IncompatibleAttribute(_k)
if isinstance(key, ComponentLink):
return key.compute(self, view)
try:
comp = self._components[key]
except KeyError:
raise IncompatibleAttribute(key)
shp = view_shape(self.shape, view)
if view is not None:
result = comp[view]
else:
if comp.categorical:
result = comp.codes
else:
result = comp.data
assert result.shape == shp, \
"Component view returned bad shape: %s %s" % (result.shape, shp)
return result
def __setitem__(self, key, value):
"""
Wrapper for data.add_component()
"""
self.add_component(value, key)
@contract(component_id='cid_like|None', returns=Component)
def get_component(self, component_id):
"""Fetch the component corresponding to component_id.
:param component_id: the component_id to retrieve
"""
if component_id is None:
raise IncompatibleAttribute()
if isinstance(component_id, six.string_types):
component_id = self.id[component_id]
try:
return self._components[component_id]
except KeyError:
raise IncompatibleAttribute(component_id)
def to_dataframe(self, index=None):
""" Convert the Data object into a pandas.DataFrame object
:param index: Any 'index-like' object that can be passed to the pandas.Series constructor
:return: pandas.DataFrame
"""
h = lambda comp: self.get_component(comp).to_series(index=index)
df = pd.DataFrame(dict((comp.label, h(comp)) for comp in self.components))
order = [comp.label for comp in self.components]
return df[order]
def reorder_components(self, component_ids):
"""
Reorder the components using a list of component IDs. The new set
of component IDs has to match the existing set (though order may differ).
"""
# We need to be careful because component IDs overload == so we can't
# use the normal ways to test whether the component IDs are the same
# as self.components - instead we need to explicitly use id
if len(component_ids) != len(self.components):
raise ValueError("Number of component in component_ids does not "
"match existing number of components")
if set(id(c) for c in self.components) != set(id(c) for c in component_ids):
raise ValueError("specified component_ids should match existing components")
existing = self.components
for idx in range(len(component_ids)):
if component_ids[idx] is not existing[idx]:
break
else:
# If we get here then the suggested order is the same as the existing one
return
# PY3: once we drop support for Python 2 we could sort in-place using
# the move_to_end method on OrderedDict
self._components = OrderedDict((key, self._components[key]) for key in component_ids)
if self.hub:
msg = DataReorderComponentMessage(self, list(self._components))
self.hub.broadcast(msg)
@contract(mapping="dict(inst($Component, $ComponentID):array_like)")
def update_components(self, mapping):
"""
Change the numerical data associated with some of the Components
in this Data object.
All changes to component numerical data should use this method,
which broadcasts the state change to the appropriate places.
:param mapping: A dict mapping Components or ComponenIDs to arrays.
This method has the following restrictions:
- New compoments must have the same shape as old compoments
- Component subclasses cannot be updated.
"""
for comp, data in mapping.items():
if isinstance(comp, ComponentID):
comp = self.get_component(comp)
data = np.asarray(data)
if data.shape != self.shape:
raise ValueError("Cannot change shape of data")
comp._data = data
# alert hub of the change
if self.hub is not None:
msg = NumericalDataChangedMessage(self)
self.hub.broadcast(msg)
for subset in self.subsets:
clear_cache(subset.subset_state.to_mask)
def update_values_from_data(self, data):
"""
Replace numerical values in data to match values from another dataset.
Notes
-----
This method drops components that aren't present in the new data, and
adds components that are in the new data that were not in the original
data. The matching is done by component label, and components are
resized if needed. This means that for components with matching labels
in the original and new data, the
:class:`~glue.core.component_id.ComponentID` are preserved, and
existing plots and selections will be updated to reflect the new
values. Note that the coordinates are also copied, but the style is
**not** copied.
"""
old_labels = [cid.label for cid in self.components]
new_labels = [cid.label for cid in data.components]
if len(old_labels) == len(set(old_labels)):
old_labels = set(old_labels)
else:
raise ValueError("Non-unique component labels in original data")
if len(new_labels) == len(set(new_labels)):
new_labels = set(new_labels)
else:
raise ValueError("Non-unique component labels in new data")
# Remove components that don't have a match in new data
for cname in old_labels - new_labels:
cid = self.find_component_id(cname)
self.remove_component(cid)
# Update shape
self._shape = data._shape
# Update components that exist in both. Note that we can't just loop
# over old_labels & new_labels since we need to make sure we preserve
# the order of the components, and sets don't preserve order.
for cid in self.components:
cname = cid.label
if cname in old_labels & new_labels:
comp_old = self.get_component(cname)
comp_new = data.get_component(cname)
comp_old._data = comp_new._data
# Add components that didn't exist in original one. As above, we try
# and preserve the order of components as much as possible.
for cid in data.components:
cname = cid.label
if cname in new_labels - old_labels:
cid = data.find_component_id(cname)
comp_new = data.get_component(cname)
self.add_component(comp_new, cid.label)
# Update data label
self.label = data.label
# Update data coordinates
self.coords = data.coords
# alert hub of the change
if self.hub is not None:
msg = NumericalDataChangedMessage(self)
self.hub.broadcast(msg)
for subset in self.subsets:
clear_cache(subset.subset_state.to_mask)
@contract(i=int, ndim=int)
def pixel_label(i, ndim):
label = "{0}".format(i)
if 1 <= ndim <= 3:
label += " [{0}]".format('xyz'[ndim - 1 - i])
return label
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/data.py",
"copies": "1",
"size": "37005",
"license": "bsd-3-clause",
"hash": -5083344422020610000,
"line_mean": 35.1730205279,
"line_max": 101,
"alpha_frac": 0.5798135387,
"autogenerated": false,
"ratio": 4.278529309746792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5358342848446791,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
class Record(object):
def __init__(self):
self.rec_data = OrderedDict()
self.rec_im_data = OrderedDict()
self.rec_imgs = OrderedDict()
self.rec_kernels = []
###########################################################################
# Functions for recording data
@property
def data_keys(self):
"""Get dictionary keys of `rec_data`"""
return list(self.rec_data)
@property
def im_data_keys(self):
"""Get dictionary keys of `rec_im_data`"""
return list(self.rec_im_data)
@property
def imgs_keys(self):
"""Get dictionary keys of `rec_imgs`"""
return list(self.rec_imgs)
@property
def num_data(self):
"""Get number of `rec_data`"""
return len(self.rec_data)
@property
def num_im_data(self):
"""Get number of `rec_im_data`"""
return len(self.rec_im_data)
@property
def num_imgs(self):
"""Get number of `rec_imgs`"""
return len(self.rec_imgs)
def empty_records(self):
self.rec_data.clear()
self.rec_im_data.clear()
self.rec_imgs.clear()
self.rec_kernels = []
def add_data(self, name, data, **kwargs):
"""Add scalar data of one minibatcth to monitor.
"""
kwargs['data'] = data
self.rec_data[name] = kwargs
def add_im_data(self, name, data, **kwargs):
"""Add scalar data for each image (imagewise) or patch (patchwise)
to record.
"""
kwargs['data'] = data
self.rec_im_data[name] = kwargs
def add_imgs(self, name, data, **kwargs):
"""Add image data for each image (imagewise) or patch (patchwise)
to record.
Supplementary information can be added via `**kwargs`.
"""
kwargs['data'] = data
self.rec_imgs[name] = kwargs
def get_function_outputs(self, train=False):
if train:
return (self.get_data())
else:
return (self.get_data() + self.get_im_data() + self.get_imgs())
def get_data(self):
return [elem['data'] for elem in list(self.rec_data.values())]
def get_im_data(self):
return [elem['data'] for elem in list(self.rec_im_data.values())]
def get_imgs(self):
return [elem['data'] for elem in list(self.rec_imgs.values())]
def get_until_indices(self, start=1):
"""Returns the 'until-indices' for each reording data type.
"""
until_loss = len(self.rec_data) + start
until_im_info = until_loss + len(self.rec_im_data)
until_img = until_im_info + len(self.rec_imgs)
return until_loss, until_im_info, until_img
def add_kernel(self, layers, nth_layers):
"""Add a kernel image from the `nth_layers` of self.layers[`key`]
to record.
"""
if isinstance(nth_layers, (list, tuple)):
for nth in nth_layers:
layer = layers[nth]
assert layer.__class__.__name__ == 'ConvLayer'
self.rec_kernels.append(layer.W)
else:
layer = layers[nth_layers]
assert layer.__class__.__name__ == 'ConvLayer'
self.rec_kernels.append(layer.W)
# def get_rec_info(self):
# rec_info = {}
# rec_info['rec_data'] = self.exclude_info(self.rec_data, 'data')
# rec_info['rec_im_data'] = self.exclude_info(self.rec_im_data, 'data')
# rec_info['rec_imgs'] = self.exclude_info(self.rec_imgs, 'data')
# return rec_info
# def exclude_info(self, dic, exclude):
# new_dic = OrderedDict()
# for dic_key in dic:
# new_elems = {}
# for elem_key in dic[dic_key]:
# if elem_key == exclude:
# continue
# else:
# new_elems[elem_key] = dic[dic_key][elem_key]
# new_dic[dic_key] = new_elems
# return new_dic
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/models/model_record.py",
"copies": "1",
"size": "4072",
"license": "mit",
"hash": -5365234332030200000,
"line_mean": 30.8125,
"line_max": 79,
"alpha_frac": 0.5424852652,
"autogenerated": false,
"ratio": 3.578207381370826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4620692646570826,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from pymel.core import aimConstraint, duplicate, hide, orientConstraint, parent
from ....add import simpleName
from .... import core
from .... import nodeApi
from ..cardRigging import MetaControl, ParamInfo
from .. import controllerShape
from ..core import config
#from .. import log
#from .. import space
from . import _util as util
from .. import node
@util.adds('AutoTwistPower')
@util.defaultspec( {'shape': 'disc', 'color': 'blue 0.22', 'size': 5, 'align': 'x'} )
def buildTwist(twist, twistDriver, twistLateralAxis=[0, 1, 0], driverLateralAxis=[0, 1, 0], defaultPower=0.5, controlSpec={}):
'''
Twist bone's aim axis = the lateral axis
Twist Up axis = points to the target (wrist)
World up = object rotation
up obj = target (wrist)
up axis = I think this is the target's lateral axis
.. todo::
I'm not sure, but it look like a "_L" is sneaking into the name somewhere
'''
container = util.parentGroup(twist)
container.setParent( node.mainGroup() )
container.rename( util.trimName(twist) + '_twist' )
anchor = duplicate( twist, po=True )[0]
aimer = duplicate( twist, po=True )[0]
space = duplicate( twist, po=True )[0]
anchor.rename( simpleName(twist, '{0}Anchor') )
aimer.rename( simpleName(twist, '{0}Aimer') )
space.rename( simpleName(twist, '{0}Space') )
space.drawStyle.set(2)
hide(anchor, aimer)
parent( anchor, aimer, space, container )
constraint = orientConstraint( anchor, aimer, space )
constraint.interpType.set(2) # Set to "shortest" because it will flip otherwise.
aimConstraint( twistDriver, aimer, wut='objectrotation', wuo=twistDriver, mo=True,
u=util.identifyAxis(twist, asVector=True), # noqa e127
aimVector=twistLateralAxis,
wu=driverLateralAxis,
)
ctrl = controllerShape.build( util.trimName(twistDriver) + "Twist", controlSpec['main'], controllerShape.ControlType.ROTATE)
ctrl.setParent(space)
ctrl.t.set( 0, 0, 0 )
ctrl.r.set( 0, 0, 0 )
core.dagObj.lockScale( ctrl )
core.dagObj.lockTrans( ctrl )
core.dagObj.lockRot( ctrl )
# Unlock the twist axis
ctrl.attr( 'r' + util.identifyAxis(twist) ).unlock()
ctrl.attr( 'r' + util.identifyAxis(twist) ).setKeyable(True)
# Drive the space's constraint
anchorAttr, autoAttr = orientConstraint( constraint, q=1, wal=1 )
util.drive( ctrl, 'AutoTwistPower', autoAttr, minVal=0, maxVal=1, dv=defaultPower )
core.math.opposite( ctrl.AutoTwistPower ) >> anchorAttr
ctrl.AutoTwistPower.set( defaultPower )
orientConstraint( ctrl, twist )
ctrl = nodeApi.RigController.convert(ctrl)
ctrl.container = container
return ctrl, container
class TwistHelper(MetaControl):
''' Special controller to automate distributed twisting, like on the forearm. '''
#displayInUI = False
fk_ = 'pdil.tool.fossil.rigging.twistHelper.buildTwist'
fkInput = OrderedDict( [
('defaultPower', ParamInfo( 'Default Power', 'Default automatic twist power', ParamInfo.FLOAT, 0.5)),
] )
@classmethod
def build(cls, card, buildFk=True):
'''
.. todo::
Make this actually respect control overrides.
'''
#twist(twist, twistDriver, twistLateralAxis=[0,0,1], driverLateralAxis=[0,0,1], controlSpec={}):
kwargs = cls.readFkKwargs(card, False)
side = card.findSuffix()
#if not util.canMirror( card.start() ) or card.isAsymmetric():
if not side or card.isAsymmetric():
ctrl, container = cls.fk(card.joints[0].real, card.extraNode[0].real, **kwargs)
card.outputCenter.fk = ctrl
else:
# Build one side...
#side = config.letterToWord[sideCode]
ctrl, container = cls.fk(card.joints[0].real, card.extraNode[0].real, **kwargs)
card.getSide(side).fk = ctrl
# ... then flip the side info and build the other
#side = config.otherLetter(side)
side = config.otherSideCode(side)
ctrl, container = cls.fk(card.joints[0].realMirror, card.extraNode[0].realMirror, **kwargs)
card.getSide(side).fk = ctrl
| {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/rigging/twistHelper.py",
"copies": "1",
"size": "4421",
"license": "bsd-3-clause",
"hash": 4798897393903188000,
"line_mean": 33.811023622,
"line_max": 128,
"alpha_frac": 0.6376385433,
"autogenerated": false,
"ratio": 3.511517077045274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46491556203452744,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
from os.path import join as pjoin
import shutil
import sys
import six
from numpy import memmap, float32, array
import numpy as np
from tempfile import mkdtemp
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': get_squares_(),
'np': np,
'indexes_': get_indexes_(),
'indexes_rand_': get_indexes_rand_()}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
else:
code = "def run():\n for a in squares_.itervalues(): a[%s]%s"
code = code % (sel, op)
six.exec_(code, ns)
self.func = ns['run']
def time_op(self, indexes, sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.tmp_dir = mkdtemp()
self.fp = memmap(pjoin(self.tmp_dir, 'tmp.dat'),
dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def teardown(self):
del self.fp
shutil.rmtree(self.tmp_dir)
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructured0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
| {
"repo_name": "pizzathief/numpy",
"path": "benchmarks/benchmarks/bench_indexing.py",
"copies": "55",
"size": "2150",
"license": "bsd-3-clause",
"hash": -6325411904109330000,
"line_mean": 25.5432098765,
"line_max": 76,
"alpha_frac": 0.5325581395,
"autogenerated": false,
"ratio": 3.282442748091603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
import sys
import six
from numpy import memmap, float32, array
import numpy as np
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': get_squares_(),
'np': np,
'indexes_': get_indexes_(),
'indexes_rand_': get_indexes_rand_()}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
else:
code = "def run():\n for a in squares_.itervalues(): a[%s]%s"
code = code % (sel, op)
six.exec_(code, ns)
self.func = ns['run']
def time_op(self, indexes, sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.fp = memmap('tmp.dat', dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructured0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
| {
"repo_name": "I--P/numpy",
"path": "benchmarks/benchmarks/bench_indexing.py",
"copies": "6",
"size": "1913",
"license": "bsd-3-clause",
"hash": 3167220865013784000,
"line_mean": 25.5694444444,
"line_max": 77,
"alpha_frac": 0.5243073706,
"autogenerated": false,
"ratio": 3.215126050420168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6739433421020168,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
import numpy as np
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.ac = self.a.copy()
self.at = self.a.T
self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_dot_trans_a_at(self):
np.dot(self.a, self.at)
def time_dot_trans_a_atc(self):
np.dot(self.a, self.atc)
def time_dot_trans_at_a(self):
np.dot(self.at, self.a)
def time_dot_trans_atc_a(self):
np.dot(self.atc, self.a)
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_inner_trans_a_a(self):
np.inner(self.a, self.a)
def time_inner_trans_a_ac(self):
np.inner(self.a, self.ac)
def time_matmul_a_b(self):
np.matmul(self.a, self.b)
def time_matmul_d_matmul_b_c(self):
np.matmul(self.d, np.matmul(self.b, self.c))
def time_matmul_trans_a_at(self):
np.matmul(self.a, self.at)
def time_matmul_trans_a_atc(self):
np.matmul(self.a, self.atc)
def time_matmul_trans_at_a(self):
np.matmul(self.at, self.a)
def time_matmul_trans_atc_a(self):
np.matmul(self.atc, self.a)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
TYPES1]
param_names = ['op', 'type']
def setup(self, op, typename):
np.seterr(all='ignore')
self.func = getattr(np.linalg, op)
if op == 'cholesky':
# we need a positive definite
self.a = np.dot(get_squares_()[typename],
get_squares_()[typename].T)
else:
self.a = get_squares_()[typename]
# check that dtype is supported at all
try:
self.func(self.a[:2, :2])
except TypeError:
raise NotImplementedError()
def time_op(self, op, typename):
self.func(self.a)
class Lstsq(Benchmark):
def setup(self):
self.a = get_squares_()['float64']
self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b, rcond=-1)
| {
"repo_name": "MSeifert04/numpy",
"path": "benchmarks/benchmarks/bench_linalg.py",
"copies": "8",
"size": "2940",
"license": "bsd-3-clause",
"hash": -2764071286904178700,
"line_mean": 25.9724770642,
"line_max": 69,
"alpha_frac": 0.5676870748,
"autogenerated": false,
"ratio": 2.859922178988327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 109
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares
import numpy as np
from io import StringIO
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_memcpy_large_out_of_place(self, typename):
l = np.ones(1024**2, dtype=np.dtype(typename))
l.copy()
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', self.squares)
class LoadtxtCSVComments(Benchmark):
# benchmarks for np.loadtxt comment handling
# when reading in CSV files
params = [10, int(1e2), int(1e4), int(1e5)]
param_names = ['num_lines']
def setup(self, num_lines):
data = [u'1,2,3 # comment'] * num_lines
# unfortunately, timeit will only run setup()
# between repeat events, but not for iterations
# within repeats, so the StringIO object
# will have to be rewinded in the benchmark proper
self.data_comments = StringIO(u'\n'.join(data))
def time_comment_loadtxt_csv(self, num_lines):
# benchmark handling of lines with comments
# when loading in from csv files
# inspired by similar benchmark in pandas
# for read_csv
# need to rewind StringIO object (unfortunately
# confounding timing result somewhat) for every
# call to timing test proper
np.loadtxt(self.data_comments,
delimiter=u',')
self.data_comments.seek(0)
class LoadtxtCSVdtypes(Benchmark):
# benchmarks for np.loadtxt operating with
# different dtypes parsed / cast from CSV files
params = (['float32', 'float64', 'int32', 'int64',
'complex128', 'str', 'object'],
[10, int(1e2), int(1e4), int(1e5)])
param_names = ['dtype', 'num_lines']
def setup(self, dtype, num_lines):
data = [u'5, 7, 888'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_dtypes_csv(self, dtype, num_lines):
# benchmark loading arrays of various dtypes
# from csv files
# state-dependent timing benchmark requires
# rewind of StringIO object
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=dtype)
self.csv_data.seek(0)
class LoadtxtCSVStructured(Benchmark):
# benchmarks for np.loadtxt operating with
# a structured data type & CSV file
def setup(self):
num_lines = 50000
data = [u"M, 21, 72, X, 155"] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_csv_struct_dtype(self):
# obligate rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=[('category_1', 'S1'),
('category_2', 'i4'),
('category_3', 'f8'),
('category_4', 'S1'),
('category_5', 'f8')])
self.csv_data.seek(0)
class LoadtxtCSVSkipRows(Benchmark):
# benchmarks for loadtxt row skipping when
# reading in csv file data; a similar benchmark
# is present in the pandas asv suite
params = [0, 500, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
np.random.seed(123)
test_array = np.random.rand(100000, 3)
self.fname = 'test_array.csv'
np.savetxt(fname=self.fname,
X=test_array,
delimiter=',')
def time_skiprows_csv(self, skiprows):
np.loadtxt(self.fname,
delimiter=',',
skiprows=skiprows)
class LoadtxtReadUint64Integers(Benchmark):
# pandas has a similar CSV reading benchmark
# modified to suit np.loadtxt
params = [550, 1000, 10000]
param_names = ['size']
def setup(self, size):
arr = np.arange(size).astype('uint64') + 2**63
self.data1 = StringIO(u'\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO(u'\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data1)
self.data1.seek(0)
def time_read_uint64_neg_values(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data2)
self.data2.seek(0)
class LoadtxtUseColsCSV(Benchmark):
# benchmark selective column reading from CSV files
# using np.loadtxt
params = [2, [1, 3], [1, 3, 5, 7]]
param_names = ['usecols']
def setup(self, usecols):
num_lines = 5000
data = [u'0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_usecols_csv(self, usecols):
# must rewind StringIO because of state
# dependence of file reading
np.loadtxt(self.csv_data,
delimiter=u',',
usecols=usecols)
self.csv_data.seek(0)
class LoadtxtCSVDateTime(Benchmark):
# benchmarks for np.loadtxt operating with
# datetime data in a CSV file
params = [20, 200, 2000, 20000]
param_names = ['num_lines']
def setup(self, num_lines):
# create the equivalent of a two-column CSV file
# with date strings in the first column and random
# floating point data in the second column
dates = np.arange('today', 20, dtype=np.datetime64)
np.random.seed(123)
values = np.random.rand(20)
date_line = u''
for date, value in zip(dates, values):
date_line += (str(date) + ',' + str(value) + '\n')
# expand data to specified number of lines
data = date_line * (num_lines // 20)
self.csv_data = StringIO(data)
def time_loadtxt_csv_datetime(self, num_lines):
# rewind StringIO object -- the timing iterations
# are state-dependent
X = np.loadtxt(self.csv_data,
delimiter=u',',
dtype=([('dates', 'M8[us]'),
('values', 'float64')]))
self.csv_data.seek(0)
| {
"repo_name": "gfyoung/numpy",
"path": "benchmarks/benchmarks/bench_io.py",
"copies": "4",
"size": "7707",
"license": "bsd-3-clause",
"hash": 7641159149774309000,
"line_mean": 30.4571428571,
"line_max": 70,
"alpha_frac": 0.5764889062,
"autogenerated": false,
"ratio": 3.563106796116505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007222586814423549,
"num_lines": 245
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares
import numpy as np
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', self.squares)
| {
"repo_name": "joferkington/numpy",
"path": "benchmarks/benchmarks/bench_io.py",
"copies": "50",
"size": "1710",
"license": "bsd-3-clause",
"hash": -7977641385077962000,
"line_mean": 25.71875,
"line_max": 70,
"alpha_frac": 0.5760233918,
"autogenerated": false,
"ratio": 3.0052724077328645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 64
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares_
import numpy as np
ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_not',
'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', 'conjugate',
'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', 'divmod',
'equal', 'exp', 'exp2', 'expm1', 'fabs', 'float_power', 'floor',
'floor_divide', 'fmax', 'fmin', 'fmod', 'frexp', 'gcd', 'greater',
'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite',
'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', 'multiply',
'negative', 'nextafter', 'not_equal', 'positive', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',
'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt',
'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
print("Missing ufunc %r" % (name,))
class Broadcast(Benchmark):
def setup(self):
self.d = np.ones((50000, 100), dtype=np.float64)
self.e = np.ones((100,), dtype=np.float64)
def time_broadcast(self):
self.d - self.e
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
timeout = 10
def setup(self, ufuncname):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
self.args = []
for t, a in get_squares_().items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
except TypeError:
continue
self.args.append(arg)
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=bool)
def time_nonzero(self):
np.nonzero(self.b)
def time_not_bool(self):
(~self.b)
def time_and_bool(self):
(self.b & self.b)
def time_or_bool(self):
(self.b | self.b)
class CustomInplace(Benchmark):
def setup(self):
self.c = np.ones(500000, dtype=np.int8)
self.i = np.ones(150000, dtype=np.int32)
self.f = np.zeros(150000, dtype=np.float32)
self.d = np.zeros(75000, dtype=np.float64)
# fault memory
self.f *= 1.
self.d *= 1.
def time_char_or(self):
np.bitwise_or(self.c, 0, out=self.c)
np.bitwise_or(0, self.c, out=self.c)
def time_char_or_temp(self):
0 | self.c | 0
def time_int_or(self):
np.bitwise_or(self.i, 0, out=self.i)
np.bitwise_or(0, self.i, out=self.i)
def time_int_or_temp(self):
0 | self.i | 0
def time_float_add(self):
np.add(self.f, 1., out=self.f)
np.add(1., self.f, out=self.f)
def time_float_add_temp(self):
1. + self.f + 1.
def time_double_add(self):
np.add(self.d, 1., out=self.d)
np.add(1., self.d, out=self.d)
def time_double_add_temp(self):
1. + self.d + 1.
class CustomScalar(Benchmark):
params = [np.float32, np.float64]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_add_scalar2(self, dtype):
np.add(self.d, 1)
def time_divide_scalar2(self, dtype):
np.divide(self.d, 1)
def time_divide_scalar2_inplace(self, dtype):
np.divide(self.d, 1, out=self.d)
def time_less_than_scalar2(self, dtype):
(self.d < 1)
class Scalar(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray((1.0 + 1j))
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
class ArgPack(object):
__slots__ = ['args', 'kwargs']
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __repr__(self):
return '({})'.format(', '.join(
[repr(a) for a in self.args] +
['{}={}'.format(k, repr(v)) for k, v in self.kwargs.items()]
))
class ArgParsing(Benchmark):
# In order to benchmark the speed of argument parsing, all but the
# out arguments are chosen such that they have no effect on the
# calculation. In particular, subok=True and where=True are
# defaults, and the dtype is the correct one (the latter will
# still have some effect on the search for the correct inner loop).
x = np.array(1.)
y = np.array(2.)
out = np.array(3.)
param_names = ['arg_kwarg']
params = [[
ArgPack(x, y),
ArgPack(x, y, out),
ArgPack(x, y, out=out),
ArgPack(x, y, out=(out,)),
ArgPack(x, y, out=out, subok=True, where=True),
ArgPack(x, y, subok=True),
ArgPack(x, y, subok=True, where=True),
ArgPack(x, y, out, subok=True, where=True)
]]
def time_add_arg_parsing(self, arg_pack):
np.add(*arg_pack.args, **arg_pack.kwargs)
class ArgParsingReduce(Benchmark):
# In order to benchmark the speed of argument parsing, all but the
# out arguments are chosen such that they have minimal effect on the
# calculation.
a = np.arange(2.)
out = np.array(0.)
param_names = ['arg_kwarg']
params = [[
ArgPack(a,),
ArgPack(a, 0),
ArgPack(a, axis=0),
ArgPack(a, 0, None),
ArgPack(a, axis=0, dtype=None),
ArgPack(a, 0, None, out),
ArgPack(a, axis=0, dtype=None, out=out),
ArgPack(a, out=out)
]]
def time_add_reduce_arg_parsing(self, arg_pack):
np.add.reduce(*arg_pack.args, **arg_pack.kwargs)
| {
"repo_name": "shoyer/numpy",
"path": "benchmarks/benchmarks/bench_ufunc.py",
"copies": "8",
"size": "6252",
"license": "bsd-3-clause",
"hash": -2752458587807895000,
"line_mean": 28.6303317536,
"line_max": 83,
"alpha_frac": 0.5577415227,
"autogenerated": false,
"ratio": 3.133834586466165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7691576109166166,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares_
import numpy as np
ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin',
'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and',
'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil',
'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad',
'degrees', 'divide', 'equal', 'exp', 'exp2', 'expm1',
'fabs', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',
'frexp', 'greater', 'greater_equal', 'hypot', 'invert',
'isfinite', 'isinf', 'isnan', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf',
'multiply', 'negative', 'nextafter', 'not_equal', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder',
'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh',
'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',
'true_divide', 'trunc']
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
print("Missing ufunc %r" % (name,))
class Broadcast(Benchmark):
def setup(self):
self.d = np.ones((50000, 100), dtype=np.float64)
self.e = np.ones((100,), dtype=np.float64)
def time_broadcast(self):
self.d - self.e
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
timeout = 10
def setup(self, ufuncname):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
self.args = []
for t, a in get_squares_().items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
except TypeError:
continue
self.args.append(arg)
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=np.bool)
def time_nonzero(self):
np.nonzero(self.b)
def time_not_bool(self):
(~self.b)
def time_and_bool(self):
(self.b & self.b)
def time_or_bool(self):
(self.b | self.b)
class CustomScalar(Benchmark):
params = [np.float32, np.float64]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_add_scalar2(self, dtype):
np.add(self.d, 1)
def time_divide_scalar2(self, dtype):
np.divide(self.d, 1)
def time_divide_scalar2_inplace(self, dtype):
np.divide(self.d, 1, out=self.d)
def time_less_than_scalar2(self, dtype):
(self.d < 1)
class Scalar(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray((1.0 + 1j))
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
| {
"repo_name": "kiwifb/numpy",
"path": "benchmarks/benchmarks/bench_ufunc.py",
"copies": "1",
"size": "3263",
"license": "bsd-3-clause",
"hash": -5798513652703011000,
"line_mean": 27.8761061947,
"line_max": 76,
"alpha_frac": 0.551333129,
"autogenerated": false,
"ratio": 3.217948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4269281846948718,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
from numpy.lib import NumpyVersion
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
class Randint(Benchmark):
def time_randint_fast(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30, size=10**5)
def time_randint_slow(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30 + 1, size=10**5)
class Randint_dtype(Benchmark):
high = {
'bool': 1,
'uint8': 2**7,
'uint16': 2**15,
'uint32': 2**31,
'uint64': 2**63
}
param_names = ['dtype']
params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
def setup(self, name):
if NumpyVersion(np.__version__) < '1.11.0.dev0':
raise NotImplementedError
def time_randint_fast(self, name):
high = self.high[name]
np.random.randint(0, high, size=10**5, dtype=name)
def time_randint_slow(self, name):
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
| {
"repo_name": "maniteja123/numpy",
"path": "benchmarks/benchmarks/bench_random.py",
"copies": "25",
"size": "1631",
"license": "bsd-3-clause",
"hash": -6777461613499908000,
"line_mean": 23.3432835821,
"line_max": 66,
"alpha_frac": 0.5744941754,
"autogenerated": false,
"ratio": 3.349075975359343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
from numpy.random import RandomState
try:
from numpy.random import Generator
except ImportError:
pass
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
class Randint(Benchmark):
def time_randint_fast(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30, size=10**5)
def time_randint_slow(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30 + 1, size=10**5)
class Randint_dtype(Benchmark):
high = {
'bool': 1,
'uint8': 2**7,
'uint16': 2**15,
'uint32': 2**31,
'uint64': 2**63
}
param_names = ['dtype']
params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
def setup(self, name):
from numpy.lib import NumpyVersion
if NumpyVersion(np.__version__) < '1.11.0.dev0':
raise NotImplementedError
def time_randint_fast(self, name):
high = self.high[name]
np.random.randint(0, high, size=10**5, dtype=name)
def time_randint_slow(self, name):
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
class Permutation(Benchmark):
def setup(self):
self.n = 10000
self.a_1d = np.random.random(self.n)
self.a_2d = np.random.random((self.n, 2))
def time_permutation_1d(self):
np.random.permutation(self.a_1d)
def time_permutation_2d(self):
np.random.permutation(self.a_2d)
def time_permutation_int(self):
np.random.permutation(self.n)
nom_size = 100000
class RNG(Benchmark):
param_names = ['rng']
params = ['PCG64', 'MT19937', 'Philox', 'SFC64', 'numpy']
def setup(self, bitgen):
if bitgen == 'numpy':
self.rg = np.random.RandomState()
else:
self.rg = Generator(getattr(np.random, bitgen)())
self.rg.random()
self.int32info = np.iinfo(np.int32)
self.uint32info = np.iinfo(np.uint32)
self.uint64info = np.iinfo(np.uint64)
def time_raw(self, bitgen):
if bitgen == 'numpy':
self.rg.random_integers(self.int32info.max, size=nom_size)
else:
self.rg.integers(self.int32info.max, size=nom_size, endpoint=True)
def time_32bit(self, bitgen):
min, max = self.uint32info.min, self.uint32info.max
if bitgen == 'numpy':
self.rg.randint(min, max + 1, nom_size, dtype=np.uint32)
else:
self.rg.integers(min, max + 1, nom_size, dtype=np.uint32)
def time_64bit(self, bitgen):
min, max = self.uint64info.min, self.uint64info.max
if bitgen == 'numpy':
self.rg.randint(min, max + 1, nom_size, dtype=np.uint64)
else:
self.rg.integers(min, max + 1, nom_size, dtype=np.uint64)
def time_normal_zig(self, bitgen):
self.rg.standard_normal(nom_size)
class Bounded(Benchmark):
u8 = np.uint8
u16 = np.uint16
u32 = np.uint32
u64 = np.uint64
param_names = ['rng', 'dt_max']
params = [['PCG64', 'MT19937', 'Philox', 'SFC64', 'numpy'],
[[u8, 95],
[u8, 64], # Worst case for legacy
[u8, 127], # Best case for legacy
[u16, 95],
[u16, 1024], # Worst case for legacy
[u16, 1535], # Typ. avg. case for legacy
[u16, 2047], # Best case for legacy
[u32, 1024], # Worst case for legacy
[u32, 1535], # Typ. avg. case for legacy
[u32, 2047], # Best case for legacy
[u64, 95],
[u64, 1024], # Worst case for legacy
[u64, 1535], # Typ. avg. case for legacy
[u64, 2047], # Best case for legacy
]]
def setup(self, bitgen, args):
if bitgen == 'numpy':
self.rg = np.random.RandomState()
else:
self.rg = Generator(getattr(np.random, bitgen)())
self.rg.random()
def time_bounded(self, bitgen, args):
"""
Timer for 8-bit bounded values.
Parameters (packed as args)
----------
dt : {uint8, uint16, uint32, unit64}
output dtype
max : int
Upper bound for range. Lower is always 0. Must be <= 2**bits.
"""
dt, max = args
if bitgen == 'numpy':
self.rg.randint(0, max + 1, nom_size, dtype=dt)
else:
self.rg.integers(0, max + 1, nom_size, dtype=dt)
class Choice(Benchmark):
params = [1e3, 1e6, 1e8]
def setup(self, v):
self.a = np.arange(v)
self.rng = np.random.default_rng()
def time_legacy_choice(self, v):
np.random.choice(self.a, 1000, replace=False)
def time_choice(self, v):
self.rng.choice(self.a, 1000, replace=False)
| {
"repo_name": "pizzathief/numpy",
"path": "benchmarks/benchmarks/bench_random.py",
"copies": "4",
"size": "5499",
"license": "bsd-3-clause",
"hash": -8347855482214384000,
"line_mean": 28.25,
"line_max": 78,
"alpha_frac": 0.5475541007,
"autogenerated": false,
"ratio": 3.3246674727932284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5872221573493228,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
from six.moves import xrange
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
param_names = ['update']
def setup(self, update):
N = 150
Niter = 1000
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
def num_update(u, dx2, dy2):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
def num_inplace(u, dx2, dy2):
tmp = u[:(-2), 1:(-1)].copy()
np.add(tmp, u[2:, 1:(-1)], out=tmp)
np.multiply(tmp, dy2, out=tmp)
tmp2 = u[1:(-1), 2:].copy()
np.add(tmp2, u[1:(-1), :(-2)], out=tmp2)
np.multiply(tmp2, dx2, out=tmp2)
np.add(tmp, tmp2, out=tmp)
np.multiply(tmp, (1.0 / (2.0 * (dx2 + dy2))),
out=u[1:(-1), 1:(-1)])
def laplace(N, Niter=100, func=num_update, args=()):
u = np.zeros([N, N], order='C')
u[0] = 1
for i in range(Niter):
func(u, *args)
return u
func = {'inplace': num_inplace, 'normal': num_update}[update]
def run():
laplace(N, Niter, func, args=(dx2, dy2))
self.run = run
def time_it(self, update):
self.run()
class MaxesOfDots(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in xrange(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
For numpy it a join benchmark of dot products and max()
on a set of arrays.
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
| {
"repo_name": "b-carter/numpy",
"path": "benchmarks/benchmarks/bench_app.py",
"copies": "61",
"size": "2746",
"license": "bsd-3-clause",
"hash": 7599924315911336000,
"line_mean": 29.8539325843,
"line_max": 78,
"alpha_frac": 0.4905316824,
"autogenerated": false,
"ratio": 3.3124246079613995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
| {
"repo_name": "WarrenWeckesser/numpy",
"path": "benchmarks/benchmarks/bench_array_coercion.py",
"copies": "17",
"size": "1705",
"license": "bsd-3-clause",
"hash": 8835328964583589000,
"line_mean": 28.9122807018,
"line_max": 77,
"alpha_frac": 0.6457478006,
"autogenerated": false,
"ratio": 3.2476190476190476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
class Sort(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
np.random.seed(25)
np.random.shuffle(self.o)
# quicksort implementations can have issues with equal elements
self.equal = np.ones(10000)
self.many_equal = np.sort(np.arange(10000) % 10)
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort(self):
np.sort(self.e)
def time_sort_random(self):
np.sort(self.o)
def time_sort_inplace(self):
self.e.sort()
def time_sort_equal(self):
self.equal.sort()
def time_sort_many_equal(self):
self.many_equal.sort()
def time_sort_worst(self):
np.sort(self.worst)
def time_argsort(self):
self.e.argsort()
def time_argsort_random(self):
self.o.argsort()
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| {
"repo_name": "behzadnouri/numpy",
"path": "benchmarks/benchmarks/bench_function_base.py",
"copies": "24",
"size": "3086",
"license": "bsd-3-clause",
"hash": -997963448862300000,
"line_mean": 23.4920634921,
"line_max": 71,
"alpha_frac": 0.5836033701,
"autogenerated": false,
"ratio": 3.1266464032421477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Block(Benchmark):
params = [1, 10, 100]
param_names = ['size']
def setup(self, n):
self.a_2d = np.ones((2 * n, 2 * n))
self.b_1d = np.ones(2 * n)
self.b_2d = 2 * self.a_2d
self.a = np.ones(3 * n)
self.b = np.ones(3 * n)
self.one_2d = np.ones((1 * n, 3 * n))
self.two_2d = np.ones((1 * n, 3 * n))
self.three_2d = np.ones((1 * n, 6 * n))
self.four_1d = np.ones(6 * n)
self.five_0d = np.ones(1 * n)
self.six_1d = np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero_2d = np.full((2 * n, 6 * n), 0)
self.one = np.ones(3 * n)
self.two = 2 * np.ones((3, 3 * n))
self.three = 3 * np.ones(3 * n)
self.four = 4 * np.ones(3 * n)
self.five = 5 * np.ones(1 * n)
self.six = 6 * np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero = np.full((2 * n, 6 * n), 0)
def time_block_simple_row_wise(self, n):
np.block([self.a_2d, self.b_2d])
def time_block_simple_column_wise(self, n):
np.block([[self.a_2d], [self.b_2d]])
def time_block_complicated(self, n):
np.block([[self.one_2d, self.two_2d],
[self.three_2d],
[self.four_1d],
[self.five_0d, self.six_1d],
[self.zero_2d]])
def time_nested(self, n):
np.block([
[
np.block([
[self.one],
[self.three],
[self.four]
]),
self.two
],
[self.five, self.six],
[self.zero]
])
def time_no_lists(self, n):
np.block(1)
np.block(np.eye(3 * n))
class Block2D(Benchmark):
params = [[(16, 16), (32, 32), (64, 64), (128, 128), (256, 256), (512, 512), (1024, 1024)],
['uint8', 'uint16', 'uint32', 'uint64'],
[(2, 2), (4, 4)]]
param_names = ['shape', 'dtype', 'n_chunks']
def setup(self, shape, dtype, n_chunks):
self.block_list = [
[np.full(shape=[s//n_chunk for s, n_chunk in zip(shape, n_chunks)],
fill_value=1, dtype=dtype) for _ in range(n_chunks[1])]
for _ in range(n_chunks[0])
]
def time_block2d(self, shape, dtype, n_chunks):
np.block(self.block_list)
class Block3D(Benchmark):
"""This benchmark concatenates an array of size ``(5n)^3``"""
# Having copy as a `mode` of the block3D
# allows us to directly compare the benchmark of block
# to that of a direct memory copy into new buffers with
# the ASV framework.
# block and copy will be plotted on the same graph
# as opposed to being displayed as separate benchmarks
params = [[1, 10, 100],
['block', 'copy']]
param_names = ['n', 'mode']
def setup(self, n, mode):
# Slow setup method: hence separated from the others above
self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
self.block = [
[
[self.a000, self.a001],
[self.a010, self.a011],
],
[
[self.a100, self.a101],
[self.a110, self.a111],
]
]
self.arr_list = [a
for two_d in self.block
for one_d in two_d
for a in one_d]
def time_3d(self, n, mode):
if mode == 'block':
np.block(self.block)
else: # mode == 'copy'
[arr.copy() for arr in self.arr_list]
# Retain old benchmark name for backward compat
time_3d.benchmark_name = "bench_shape_base.Block.time_3d"
| {
"repo_name": "gfyoung/numpy",
"path": "benchmarks/benchmarks/bench_shape_base.py",
"copies": "10",
"size": "4406",
"license": "bsd-3-clause",
"hash": -4505995233329370600,
"line_mean": 30.9275362319,
"line_max": 95,
"alpha_frac": 0.4809350885,
"autogenerated": false,
"ratio": 3.127040454222853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8607975542722853,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [np.arange(1000), np.arange(1000)]
self.l10x10 = np.ones((10, 10))
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_l(self):
np.array(self.l)
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
class Temporaries(Benchmark):
def setup(self):
self.amid = np.ones(50000)
self.bmid = np.ones(50000)
self.alarge = np.ones(1000000)
self.blarge = np.ones(1000000)
def time_mid(self):
(self.amid * 2) + self.bmid
def time_mid2(self):
(self.amid + self.bmid) - 2
def time_large(self):
(self.alarge * 2) + self.blarge
def time_large2(self):
(self.alarge + self.blarge) - 2
class CorrConv(Benchmark):
params = [[50, 1000, int(1e5)],
[10, 100, 1000, int(1e4)],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
class CountNonzero(Benchmark):
param_names = ['numaxes', 'size', 'dtype']
params = [
[1, 2, 3],
[100, 10000, 1000000],
[bool, int, str, object]
]
def setup(self, numaxes, size, dtype):
self.x = np.arange(numaxes * size).reshape(numaxes, size)
self.x = (self.x % 3).astype(dtype)
def time_count_nonzero(self, numaxes, size, dtype):
np.count_nonzero(self.x)
def time_count_nonzero_axis(self, numaxes, size, dtype):
np.count_nonzero(self.x, axis=self.x.ndim - 1)
def time_count_nonzero_multi_axis(self, numaxes, size, dtype):
if self.x.ndim >= 2:
np.count_nonzero(self.x, axis=(
self.x.ndim - 1, self.x.ndim - 2))
class PackBits(Benchmark):
param_names = ['dtype']
params = [[bool, np.uintp]]
def setup(self, dtype):
self.d = np.ones(10000, dtype=dtype)
self.d2 = np.ones((200, 1000), dtype=dtype)
def time_packbits(self, dtype):
np.packbits(self.d)
def time_packbits_axis0(self, dtype):
np.packbits(self.d2, axis=0)
def time_packbits_axis1(self, dtype):
np.packbits(self.d2, axis=1)
class UnpackBits(Benchmark):
def setup(self):
self.d = np.ones(10000, dtype=np.uint8)
self.d2 = np.ones((200, 1000), dtype=np.uint8)
def time_unpackbits(self):
np.unpackbits(self.d)
def time_unpackbits_axis0(self):
np.unpackbits(self.d2, axis=0)
def time_unpackbits_axis1(self):
np.unpackbits(self.d2, axis=1)
class Indices(Benchmark):
def time_indices(self):
np.indices((1000, 500))
| {
"repo_name": "shoyer/numpy",
"path": "benchmarks/benchmarks/bench_core.py",
"copies": "4",
"size": "4147",
"license": "bsd-3-clause",
"hash": 6883988163029921000,
"line_mean": 22.8333333333,
"line_max": 66,
"alpha_frac": 0.5813841331,
"autogenerated": false,
"ratio": 2.968503937007874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003157761778451434,
"num_lines": 174
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
class SortGenerator(object):
# The size of the unsorted area in the "random unsorted area"
# benchmarks
AREA_SIZE = 100
# The size of the "partially ordered" sub-arrays
BUBBLE_SIZE = 100
@staticmethod
def random(size, dtype):
"""
Returns a randomly-shuffled array.
"""
arr = np.arange(size, dtype=dtype)
np.random.shuffle(arr)
return arr
@staticmethod
def ordered(size, dtype):
"""
Returns an ordered array.
"""
return np.arange(size, dtype=dtype)
@staticmethod
def reversed(size, dtype):
"""
Returns an array that's in descending order.
"""
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
def uniform(size, dtype):
"""
Returns an array that has the same value everywhere.
"""
return np.ones(size, dtype=dtype)
@staticmethod
def swapped_pair(size, dtype, swap_frac):
"""
Returns an ordered array, but one that has ``swap_frac * size``
pairs swapped.
"""
a = np.arange(size, dtype=dtype)
for _ in range(int(size * swap_frac)):
x, y = np.random.randint(0, size, 2)
a[x], a[y] = a[y], a[x]
return a
@staticmethod
def sorted_block(size, dtype, block_size):
"""
Returns an array with blocks that are all sorted.
"""
a = np.arange(size, dtype=dtype)
b = []
if size < block_size:
return a
block_num = size // block_size
for i in range(block_num):
b.extend(a[i::block_num])
return np.array(b)
@classmethod
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
"""
This type of array has random unsorted areas such that they
compose the fraction ``frac`` of the original array.
"""
if area_size is None:
area_size = cls.AREA_SIZE
area_num = int(size * frac / area_size)
a = np.arange(size, dtype=dtype)
for _ in range(area_num):
start = np.random.randint(size-area_size)
end = start + area_size
np.random.shuffle(a[start:end])
return a
@classmethod
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
"""
This type of array has ``bubble_num`` random unsorted areas.
"""
if bubble_size is None:
bubble_size = cls.BUBBLE_SIZE
frac = bubble_size * bubble_num / size
return cls.random_unsorted_area(size, dtype, frac, bubble_size)
class Sort(Benchmark):
"""
This benchmark tests sorting performance with several
different types of arrays that are likely to appear in
real-world applications.
"""
params = [
# In NumPy 1.17 and newer, 'merge' can be one of several
# stable sorts, it isn't necessarily merge sort.
['quick', 'merge', 'heap'],
['float64', 'int64', 'uint64'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
('swapped_pair', 0.01),
('swapped_pair', 0.1),
('swapped_pair', 0.5),
('random_unsorted_area', 0.5),
('random_unsorted_area', 0.1),
('random_unsorted_area', 0.01),
('random_bubble', 1),
('random_bubble', 5),
('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 10000
def setup(self, kind, dtype, array_type):
np.random.seed(1234)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
def time_sort_inplace(self, kind, dtype, array_type):
self.arr.sort(kind=kind)
def time_sort(self, kind, dtype, array_type):
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
def setup(self):
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort_worst(self):
np.sort(self.worst)
# Retain old benchmark name for backward compatability
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| {
"repo_name": "shoyer/numpy",
"path": "benchmarks/benchmarks/bench_function_base.py",
"copies": "1",
"size": "7481",
"license": "bsd-3-clause",
"hash": 7916582763484677000,
"line_mean": 26.7074074074,
"line_max": 95,
"alpha_frac": 0.564764069,
"autogenerated": false,
"ratio": 3.4379595588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9498523440614463,
"avg_score": 0.0008400374418133981,
"num_lines": 270
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
param_names = ['update']
def setup(self, update):
N = 150
Niter = 1000
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
def num_update(u, dx2, dy2):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
def num_inplace(u, dx2, dy2):
tmp = u[:(-2), 1:(-1)].copy()
np.add(tmp, u[2:, 1:(-1)], out=tmp)
np.multiply(tmp, dy2, out=tmp)
tmp2 = u[1:(-1), 2:].copy()
np.add(tmp2, u[1:(-1), :(-2)], out=tmp2)
np.multiply(tmp2, dx2, out=tmp2)
np.add(tmp, tmp2, out=tmp)
np.multiply(tmp, (1.0 / (2.0 * (dx2 + dy2))),
out=u[1:(-1), 1:(-1)])
def laplace(N, Niter=100, func=num_update, args=()):
u = np.zeros([N, N], order='C')
u[0] = 1
for i in range(Niter):
func(u, *args)
return u
func = {'inplace': num_inplace, 'normal': num_update}[update]
def run():
laplace(N, Niter, func, args=(dx2, dy2))
self.run = run
def time_it(self, update):
self.run()
class MaxesOfDots(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in xrange(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
For numpy it a join benchmark of dot products and max()
on a set of arrays.
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
| {
"repo_name": "ChristopherHogan/numpy",
"path": "benchmarks/benchmarks/bench_app.py",
"copies": "29",
"size": "2716",
"license": "bsd-3-clause",
"hash": -5715242200719790000,
"line_mean": 30.2183908046,
"line_max": 78,
"alpha_frac": 0.4871134021,
"autogenerated": false,
"ratio": 3.30816077953715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class MA(Benchmark):
def setup(self):
self.l100 = range(100)
self.t100 = ([True] * 100)
def time_masked_array(self):
np.ma.masked_array()
def time_masked_array_l100(self):
np.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
np.ma.masked_array(self.l100, self.t100)
class Indexing(Benchmark):
param_names = ['masked', 'ndim', 'size']
params = [[True, False],
[1, 2],
[10, 100, 1000]]
def setup(self, masked, ndim, size):
x = np.arange(size**ndim).reshape(ndim * (size,))
if masked:
self.m = np.ma.array(x, mask=x%2 == 0)
else:
self.m = np.ma.array(x)
self.idx_scalar = (size//2,) * ndim
self.idx_0d = (size//2,) * ndim + (Ellipsis,)
self.idx_1d = (size//2,) * (ndim - 1)
def time_scalar(self, masked, ndim, size):
self.m[self.idx_scalar]
def time_0d(self, masked, ndim, size):
self.m[self.idx_0d]
def time_1d(self, masked, ndim, size):
self.m[self.idx_1d]
class UFunc(Benchmark):
param_names = ['a_masked', 'b_masked', 'size']
params = [[True, False],
[True, False],
[10, 100, 1000]]
def setup(self, a_masked, b_masked, size):
x = np.arange(size).astype(np.uint8)
self.a_scalar = np.ma.masked if a_masked else 5
self.b_scalar = np.ma.masked if b_masked else 3
self.a_1d = np.ma.array(x, mask=x%2 == 0 if a_masked else np.ma.nomask)
self.b_1d = np.ma.array(x, mask=x%3 == 0 if b_masked else np.ma.nomask)
self.a_2d = self.a_1d.reshape(1, -1)
self.b_2d = self.a_1d.reshape(-1, 1)
def time_scalar(self, a_masked, b_masked, size):
np.ma.add(self.a_scalar, self.b_scalar)
def time_scalar_1d(self, a_masked, b_masked, size):
np.ma.add(self.a_scalar, self.b_1d)
def time_1d(self, a_masked, b_masked, size):
np.ma.add(self.a_1d, self.b_1d)
def time_2d(self, a_masked, b_masked, size):
# broadcasting happens this time
np.ma.add(self.a_2d, self.b_2d)
class Concatenate(Benchmark):
param_names = ['mode', 'n']
params = [
['ndarray', 'unmasked',
'ndarray+masked', 'unmasked+masked',
'masked'],
[2, 100, 2000]
]
def setup(self, mode, n):
# avoid np.zeros's lazy allocation that cause page faults during benchmark.
# np.fill will cause pagefaults to happen during setup.
normal = np.full((n, n), 0, int)
unmasked = np.ma.zeros((n, n), int)
masked = np.ma.array(normal, mask=True)
mode_parts = mode.split('+')
base = mode_parts[0]
promote = 'masked' in mode_parts[1:]
if base == 'ndarray':
args = 10 * (normal,)
elif base == 'unmasked':
args = 10 * (unmasked,)
else:
args = 10 * (masked,)
if promote:
args = args[:-1] + (masked,)
self.args = args
def time_it(self, mode, n):
np.ma.concatenate(self.args)
| {
"repo_name": "gfyoung/numpy",
"path": "benchmarks/benchmarks/bench_ma.py",
"copies": "10",
"size": "3222",
"license": "bsd-3-clause",
"hash": -1295410326680170000,
"line_mean": 27.0173913043,
"line_max": 83,
"alpha_frac": 0.5509000621,
"autogenerated": false,
"ratio": 3.0744274809160306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007035030342996158,
"num_lines": 115
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
class Randint(Benchmark):
def time_randint_fast(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30, size=10**5)
def time_randint_slow(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30 + 1, size=10**5)
class Randint_dtype(Benchmark):
high = {
'bool': 1,
'uint8': 2**7,
'uint16': 2**15,
'uint32': 2**31,
'uint64': 2**63
}
param_names = ['dtype']
params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
def setup(self, name):
from numpy.lib import NumpyVersion
if NumpyVersion(np.__version__) < '1.11.0.dev0':
raise NotImplementedError
def time_randint_fast(self, name):
high = self.high[name]
np.random.randint(0, high, size=10**5, dtype=name)
def time_randint_slow(self, name):
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
| {
"repo_name": "solarjoe/numpy",
"path": "benchmarks/benchmarks/bench_random.py",
"copies": "17",
"size": "1639",
"license": "bsd-3-clause",
"hash": 3290632973403042000,
"line_mean": 23.4626865672,
"line_max": 66,
"alpha_frac": 0.5716900549,
"autogenerated": false,
"ratio": 3.3655030800821355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014925373134328358,
"num_lines": 67
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [numpy.arange(1000), numpy.arange(1000)]
self.l10x10 = numpy.ones((10, 10))
def time_array_1(self):
numpy.array(1)
def time_array_empty(self):
numpy.array([])
def time_array_l1(self):
numpy.array([1])
def time_array_l100(self):
numpy.array(self.l100)
def time_array_l(self):
numpy.array(self.l)
def time_vstack_l(self):
numpy.vstack(self.l)
def time_hstack_l(self):
numpy.hstack(self.l)
def time_dstack_l(self):
numpy.dstack(self.l)
def time_arange_100(self):
numpy.arange(100)
def time_zeros_100(self):
numpy.zeros(100)
def time_ones_100(self):
numpy.ones(100)
def time_empty_100(self):
numpy.empty(100)
def time_eye_100(self):
numpy.eye(100)
def time_identity_100(self):
numpy.identity(100)
def time_eye_3000(self):
numpy.eye(3000)
def time_identity_3000(self):
numpy.identity(3000)
def time_diag_l100(self):
numpy.diag(self.l100)
def time_diagflat_l100(self):
numpy.diagflat(self.l100)
def time_diagflat_l50_l50(self):
numpy.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
numpy.triu(self.l10x10)
def time_tril_l10x10(self):
numpy.tril(self.l10x10)
class MA(Benchmark):
def setup(self):
self.l100 = range(100)
self.t100 = ([True] * 100)
def time_masked_array(self):
numpy.ma.masked_array()
def time_masked_array_l100(self):
numpy.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
numpy.ma.masked_array(self.l100, self.t100)
| {
"repo_name": "cjermain/numpy",
"path": "benchmarks/benchmarks/bench_core.py",
"copies": "39",
"size": "1929",
"license": "bsd-3-clause",
"hash": 9178732393563715000,
"line_mean": 20.1978021978,
"line_max": 64,
"alpha_frac": 0.5992742354,
"autogenerated": false,
"ratio": 3.106280193236715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018946570670708602,
"num_lines": 91
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
try:
from numpy.core.overrides import array_function_dispatch
except ImportError:
# Don't fail at import time with old Numpy versions
def array_function_dispatch(*args, **kwargs):
def wrap(*args, **kwargs):
return None
return wrap
import numpy as np
def _broadcast_to_dispatcher(array, shape, subok=None):
return (array,)
@array_function_dispatch(_broadcast_to_dispatcher)
def mock_broadcast_to(array, shape, subok=False):
pass
def _concatenate_dispatcher(arrays, axis=None, out=None):
if out is not None:
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_concatenate_dispatcher)
def mock_concatenate(arrays, axis=0, out=None):
pass
class DuckArray(object):
def __array_function__(self, func, types, args, kwargs):
pass
class ArrayFunction(Benchmark):
def setup(self):
self.numpy_array = np.array(1)
self.numpy_arrays = [np.array(1), np.array(2)]
self.many_arrays = 500 * self.numpy_arrays
self.duck_array = DuckArray()
self.duck_arrays = [DuckArray(), DuckArray()]
self.mixed_arrays = [np.array(1), DuckArray()]
def time_mock_broadcast_to_numpy(self):
mock_broadcast_to(self.numpy_array, ())
def time_mock_broadcast_to_duck(self):
mock_broadcast_to(self.duck_array, ())
def time_mock_concatenate_numpy(self):
mock_concatenate(self.numpy_arrays, axis=0)
def time_mock_concatenate_many(self):
mock_concatenate(self.many_arrays, axis=0)
def time_mock_concatenate_duck(self):
mock_concatenate(self.duck_arrays, axis=0)
def time_mock_concatenate_mixed(self):
mock_concatenate(self.mixed_arrays, axis=0)
| {
"repo_name": "shoyer/numpy",
"path": "benchmarks/benchmarks/bench_overrides.py",
"copies": "8",
"size": "1859",
"license": "bsd-3-clause",
"hash": 8798350175554663000,
"line_mean": 25.9420289855,
"line_max": 64,
"alpha_frac": 0.6691769769,
"autogenerated": false,
"ratio": 3.5477099236641223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8216886900564123,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares_, indexes_, indexes_rand_
import sys
import six
from numpy import memmap, float32, array
import numpy as np
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': squares_,
'np': np,
'indexes_': indexes_,
'indexes_rand_': indexes_rand_}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
else:
code = "def run():\n for a in squares_.itervalues(): a[%s]%s"
code = code % (sel, op)
six.exec_(code, ns)
self.func = ns['run']
def time_op(self, indexes, sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.fp = memmap('tmp.dat', dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructured0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
| {
"repo_name": "groutr/numpy",
"path": "benchmarks/benchmarks/bench_indexing.py",
"copies": "8",
"size": "1883",
"license": "bsd-3-clause",
"hash": -513901367072153500,
"line_mean": 25.1527777778,
"line_max": 77,
"alpha_frac": 0.5231014339,
"autogenerated": false,
"ratio": 3.2409638554216866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 72
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares_, indexes_rand
import numpy as np
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
list(squares_.keys())]
param_names = ['op', 'type']
def setup(self, op, typename):
np.seterr(all='ignore')
self.func = getattr(np.linalg, op)
if op == 'cholesky':
# we need a positive definite
self.a = np.dot(squares_[typename],
squares_[typename].T)
else:
self.a = squares_[typename]
# check that dtype is supported at all
try:
self.func(self.a[:2, :2])
except TypeError:
raise NotImplementedError()
def time_op(self, op, typename):
self.func(self.a)
class Lstsq(Benchmark):
def setup(self):
self.a = squares_['float64']
self.b = indexes_rand[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b)
| {
"repo_name": "GrimDerp/numpy",
"path": "benchmarks/benchmarks/bench_linalg.py",
"copies": "29",
"size": "1927",
"license": "bsd-3-clause",
"hash": 77917432420441020,
"line_mean": 26.5285714286,
"line_max": 64,
"alpha_frac": 0.5651271406,
"autogenerated": false,
"ratio": 3.0587301587301585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares
import numpy as np
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def time_vb_savez_squares(self):
np.savez('tmp.npz', squares)
| {
"repo_name": "moreati/numpy",
"path": "benchmarks/benchmarks/bench_io.py",
"copies": "29",
"size": "1642",
"license": "bsd-3-clause",
"hash": -8933031371171915000,
"line_mean": 25.9180327869,
"line_max": 70,
"alpha_frac": 0.5755176614,
"autogenerated": false,
"ratio": 2.9854545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares_
import numpy as np
ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin',
'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and',
'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil',
'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad',
'degrees', 'divide', 'equal', 'exp', 'exp2', 'expm1',
'fabs', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',
'frexp', 'greater', 'greater_equal', 'hypot', 'invert',
'isfinite', 'isinf', 'isnan', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf',
'multiply', 'negative', 'nextafter', 'not_equal', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder',
'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh',
'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',
'true_divide', 'trunc']
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
print("Missing ufunc %r" % (name,))
class Broadcast(Benchmark):
def setup(self):
self.d = np.ones((50000, 100), dtype=np.float64)
self.e = np.ones((100,), dtype=np.float64)
def time_broadcast(self):
self.d - self.e
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
timeout = 2
def setup(self, ufuncname):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
self.args = []
for t, a in squares_.items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
except TypeError:
continue
self.args.append(arg)
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=np.bool)
def time_nonzero(self):
np.nonzero(self.b)
def time_count_nonzero(self):
np.count_nonzero(self.b)
def time_not_bool(self):
(~self.b)
def time_and_bool(self):
(self.b & self.b)
def time_or_bool(self):
(self.b | self.b)
class CustomScalar(Benchmark):
params = [np.float32, np.float64]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_add_scalar2(self, dtype):
np.add(self.d, 1)
def time_divide_scalar2(self, dtype):
np.divide(self.d, 1)
def time_divide_scalar2_inplace(self, dtype):
np.divide(self.d, 1, out=self.d)
def time_less_than_scalar2(self, dtype):
(self.d < 1)
class Scalar(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray((1.0 + 1j))
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
| {
"repo_name": "rhythmsosad/numpy",
"path": "benchmarks/benchmarks/bench_ufunc.py",
"copies": "29",
"size": "3320",
"license": "bsd-3-clause",
"hash": -348672514474527360,
"line_mean": 27.6206896552,
"line_max": 76,
"alpha_frac": 0.5524096386,
"autogenerated": false,
"ratio": 3.2233009708737863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 116
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, TYPES1, get_squares
import numpy as np
class AddReduce(Benchmark):
def setup(self):
self.squares = get_squares().values()
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
# avoid np.zeros's lazy allocation that would
# cause page faults during benchmark
self.zeros = np.full(100000, 0, bool)
self.ones = np.full(100000, 0, bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| {
"repo_name": "gfyoung/numpy",
"path": "benchmarks/benchmarks/bench_reduce.py",
"copies": "3",
"size": "1567",
"license": "bsd-3-clause",
"hash": 369208901052498100,
"line_mean": 21.7101449275,
"line_max": 64,
"alpha_frac": 0.6119974474,
"autogenerated": false,
"ratio": 3.2851153039832286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 69
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, TYPES1
import numpy as np
class Take(Benchmark):
params = [
[(1000, 1), (1000, 2), (2, 1000, 1), (1000, 3)],
["raise", "wrap", "clip"],
TYPES1]
param_names = ["shape", "mode", "dtype"]
def setup(self, shape, mode, dtype):
self.arr = np.ones(shape, dtype)
self.indices = np.arange(1000)
def time_contiguous(self, shape, mode, dtype):
self.arr.take(self.indices, axis=-2, mode=mode)
class PutMask(Benchmark):
params = [
[True, False],
TYPES1]
param_names = ["values_is_scalar", "dtype"]
def setup(self, values_is_scalar, dtype):
if values_is_scalar:
self.vals = np.array(1., dtype=dtype)
else:
self.vals = np.ones(1000, dtype=dtype)
self.arr = np.ones(1000, dtype=dtype)
self.dense_mask = np.ones(1000, dtype="bool")
self.sparse_mask = np.zeros(1000, dtype="bool")
def time_dense(self, values_is_scalar, dtype):
np.putmask(self.arr, self.dense_mask, self.vals)
def time_sparse(self, values_is_scalar, dtype):
np.putmask(self.arr, self.sparse_mask, self.vals)
| {
"repo_name": "anntzer/numpy",
"path": "benchmarks/benchmarks/bench_itemselection.py",
"copies": "17",
"size": "1247",
"license": "bsd-3-clause",
"hash": -592601035751068200,
"line_mean": 26.7111111111,
"line_max": 64,
"alpha_frac": 0.5950280674,
"autogenerated": false,
"ratio": 3.3253333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .common import Benchmark, TYPES1, squares
import numpy as np
class AddReduce(Benchmark):
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in squares.values()]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in squares.values()]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = squares[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
self.zeros = np.zeros(100000, np.bool)
self.ones = np.ones(100000, np.bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| {
"repo_name": "Yusa95/numpy",
"path": "benchmarks/benchmarks/bench_reduce.py",
"copies": "29",
"size": "1399",
"license": "bsd-3-clause",
"hash": 7202777401067326000,
"line_mean": 20.859375,
"line_max": 64,
"alpha_frac": 0.611150822,
"autogenerated": false,
"ratio": 3.2013729977116703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import IsosurfaceLayerArtist
from .layer_style_widget import IsosurfaceLayerStyleWidget
from .viewer_state import Vispy3DIsosurfaceViewerState
from ..common import tools as _tools, selection_tools # noqa
class VispyIsosurfaceViewer(BaseVispyViewer):
LABEL = "3D Isosurface Rendering"
_state_cls = Vispy3DIsosurfaceViewerState
_layer_style_widget_cls = IsosurfaceLayerStyleWidget
tools = BaseVispyViewer.tools
_data_artist_cls = IsosurfaceLayerArtist
_subset_artist_cls = IsosurfaceLayerArtist
def add_data(self, data):
first_layer_artist = len(self._layer_artist_container) == 0
added = super(VispyIsosurfaceViewer, self).add_data(data)
if added:
if first_layer_artist:
self.state.set_limits(*self._layer_artist_container[0].bbox)
self._ready_draw = True
return added
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/isosurface/isosurface_viewer.py",
"copies": "2",
"size": "1025",
"license": "bsd-2-clause",
"hash": 6176645927758370000,
"line_mean": 29.1470588235,
"line_max": 76,
"alpha_frac": 0.7190243902,
"autogenerated": false,
"ratio": 3.451178451178451,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 34
} |
from __future__ import absolute_import, division, print_function
from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import ScatterLayerArtist
from .layer_style_widget import ScatterLayerStyleWidget
from .viewer_state import Vispy3DScatterViewerState
from ..common import selection_tools # noqa
from . import scatter_toolbar # noqa
class VispyScatterViewer(BaseVispyViewer):
LABEL = "3D Scatter Plot"
_state_cls = Vispy3DScatterViewerState
_layer_style_widget_cls = ScatterLayerStyleWidget
tools = BaseVispyViewer.tools + ['vispy:lasso', 'vispy:rectangle',
'vispy:circle', 'scatter3d:point']
def add_data(self, data):
if data in self._layer_artist_container:
return True
first_layer_artist = len(self._layer_artist_container) == 0
layer_artist = ScatterLayerArtist(layer=data, vispy_viewer=self)
self._layer_artist_container.append(layer_artist)
if first_layer_artist:
self.state.set_limits(*layer_artist.default_limits)
for subset in data.subsets:
self.add_subset(subset)
return True
def add_subset(self, subset):
if subset in self._layer_artist_container:
return
layer_artist = ScatterLayerArtist(layer=subset, vispy_viewer=self)
self._layer_artist_container.append(layer_artist)
def _add_subset(self, message):
self.add_subset(message.subset)
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/scatter_viewer.py",
"copies": "1",
"size": "1491",
"license": "bsd-2-clause",
"hash": 2833851321135696000,
"line_mean": 28.2352941176,
"line_max": 74,
"alpha_frac": 0.6800804829,
"autogenerated": false,
"ratio": 3.765151515151515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4945231998051515,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .compatibility import reduce, Iterator
from .matching import (Traverser, Pattern, PatternSet, StaticPatternSet,
DynamicPatternSet)
from .util import copy_doc
class Engine(object):
"""Main entry point for Pinyon"""
def __init__(self, context):
self.context = context
@copy_doc(Pattern, True)
def pattern(self, pat, vars=()):
return Pattern(self.context, pat, vars)
@copy_doc(PatternSet, True)
def patternset(self, patterns, type='static'):
if type == 'dynamic':
return DynamicPatternSet(self.context, patterns)
else:
return StaticPatternSet(self.context, patterns)
class Context(object):
"""Abstracting the interface for a term"""
def __init__(self, head=None, args=None, subs=None, rebuild=None):
self.head = head
self.args = args
self.subs = subs
self.rebuild = rebuild
def index(self, term, inds):
"""Get a subterm from its path index"""
return reduce(self.get, inds, term)
def get(self, term, ind):
"""Get the `ind`th subterm of `term`."""
return self.args(term)[ind]
def traverse(self, term, variant="normal"):
"""Perform a preorder traversal of a term.
Parameters
----------
term : term
variant : str, optional
Specify a variation of preorder traversal. Options are:
- ``"normal"``: yields ``term``
- ``"path"``: yields ``(term, path_index)``
- ``"arity"``: yields ``(term, arity)``
- ``"copyable"``: a copyable, stack based implementation, good for
backtracking. Yields ``term``
"""
if variant == 'copyable':
return Traverser(self, term)
else:
return PreorderTraversal(self, term, variant)
# TODO: It would be really nice to get rid of one of the traversal
# implementations. The stack based implementation (`Traverser`, imported from
# matching/core.py) is only used by the dynamic pattern sets, but a way to
# store the iteration state is necessary for backtracking. Conversely, the
# recursive algorithm is only necessary for the static pattern sets, as the
# path index of the current term is much faster to find in a recursive fashion.
# I'm fairly certain there must be a way to eliminate one of these...
class PreorderTraversal(Iterator):
"""Preorder traversal of a generic term"""
def __init__(self, context, node, variant="normal"):
self._skip_flag = False
args = context.args
if variant == "path":
# Yield (node, path index)
def _traverse(self, node, cur_path=()):
yield node, cur_path
if self._skip_flag:
self._skip_flag = False
return
for i, t in enumerate(args(node)):
for st, path in _traverse(self, t, cur_path + (i,)):
yield st, path
elif variant == "arity":
# Yield (node, path index)
def _traverse(self, node):
childs = args(node)
yield node, len(childs)
if self._skip_flag:
self._skip_flag = False
return
for t in childs:
for res in _traverse(self, t):
yield res
elif variant is "normal":
# Yield node
def _traverse(self, node):
yield node
if self._skip_flag:
self._skip_flag = False
return
for t in args(node):
for st in _traverse(self, t):
yield st
self._pt = _traverse(self, node)
def skip(self):
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
| {
"repo_name": "jcrist/pinyon",
"path": "pinyon/core.py",
"copies": "1",
"size": "4023",
"license": "bsd-3-clause",
"hash": 59730178123307200,
"line_mean": 32.8067226891,
"line_max": 79,
"alpha_frac": 0.5550584141,
"autogenerated": false,
"ratio": 4.387131952017448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5442190366117448,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..compatibility import _strtypes, _inttypes
__all__ = 'parse_index', 'emit_index'
def parse_index(ind, inside=False):
""" Parse structured index into Pythonic form
>>> parse_index([1, {'start': 0, 'stop': 10}])
(1, slice(0, 10, None))
See also:
emit_index
"""
if isinstance(ind, (_inttypes, _strtypes)):
return ind
if isinstance(ind, list):
result = [parse_index(i, True) for i in ind]
if not inside:
result = tuple(result)
if not inside and len(result) == 1:
result = result[0]
return result
if isinstance(ind, dict):
return slice(ind.get('start'), ind.get('stop'), ind.get('step'))
raise ValueError('Do not know how to parse %s into an index' % str(ind))
def emit_index(ind):
""" Emit Python index into structured form
>>> emit_index((1, slice(0, 10, None))) #doctest: +SKIP
[1, {'start': 0, 'stop': 10}]
See also:
parse_index
"""
if isinstance(ind, (_inttypes, _strtypes)):
return ind
if isinstance(ind, (list, tuple)):
return list(map(emit_index, ind))
if isinstance(ind, slice):
result = {'start': ind.start, 'stop': ind.stop, 'step': ind.step}
if result['step'] is None:
del result['step']
return result
| {
"repo_name": "LiaoPan/blaze",
"path": "blaze/server/index.py",
"copies": "10",
"size": "1398",
"license": "bsd-3-clause",
"hash": 6644134238319161000,
"line_mean": 28.125,
"line_max": 76,
"alpha_frac": 0.5793991416,
"autogenerated": false,
"ratio": 3.621761658031088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9201160799631088,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
from collections import namedtuple
from llvm.core import Constant
import llvm.core as lc
import llvm_cbuilder.shortnames as C
from blaze.py2help import reduce
@contextmanager
def position(builder, block):
'''Temporarily move to the new block and return once the context closes.
'''
orig = builder.basic_block
builder.position_at_end(block)
yield
builder.position_at_end(orig)
def compare_unsigned(builder, cmpstr, lhs, rhs):
icmpmap = {
'==': lc.ICMP_EQ,
'!=': lc.ICMP_NE,
'<' : lc.ICMP_ULT,
'<=' : lc.ICMP_ULE,
'>' : lc.ICMP_UGT,
'>=' : lc.ICMP_UGE,
}
return builder.icmp(icmpmap[cmpstr], lhs, rhs)
def compare_signed(builder, cmpstr, lhs, rhs):
icmpmap = {
'==': lc.ICMP_EQ,
'!=': lc.ICMP_NE,
'<' : lc.ICMP_SLT,
'<=' : lc.ICMP_SLE,
'>' : lc.ICMP_SGT,
'>=' : lc.ICMP_SGE,
}
return builder.icmp(icmpmap[cmpstr], lhs, rhs)
def debug(builder, msg, *args):
mod = builder.basic_block.function.module
val = lc.Constant.stringz(msg)
gvar = mod.add_global_variable(val.type, 'debugstr.%x' % hash(msg))
gvar.initializer = val
gvar.global_constant = True
charp = lc.Type.pointer(lc.Type.int(8))
printfty = lc.Type.function(lc.Type.int(), [charp], var_arg=True)
printf = mod.get_or_insert_function(printfty, name='printf')
builder.call(printf,
[builder.bitcast(gvar, charp)] + list(args))
_loop_info = namedtuple('loop_info', ['entry', 'body', 'incr', 'end',
'indices'])
@contextmanager
def loop_nest(builder, begins, ends, order=None, intp=C.intp, steps=None, dbg=False):
'''Insert a N-dimension loop nest.
Equivalent to:
ax0 = order[-1]
ax1 = order[-2]
ax2 = order[-3]
for i in range(begins[ax0], ends[ax0], steps[ax0]):
for j in range(begins[ax1], ends[ax1], steps[ax1]):
for k in range(begins[ax2], ends[ax2], steps[ax2]):
...
order: order[-1] is the outermost axis. order[0] is the innermost axis.
begins: list of llvm value for the start of the index for each axis.
ends: list fo llvm value for the end of the index for each axis.
steps: default to 1 for all axis.
intp: integer type for indexing.
dbg: boolean to enable debug mode; default to false.
Returns a namedtuple of with entry = <entry block>, body = <body block>,
incr = <increment block>, end = <ending block>,
indices = <list of index values>
Note: When the context exits, the builder is at the end of the original
basicblock. It is user's responsibilty to add branch into
the entry of the loop.
'''
# default steps to one
if not steps:
steps = [Constant.int(intp, 1) for _ in range(len(begins))]
if not order:
order = range(len(begins))
# initialize blocks
func = builder.basic_block.function
orig = builder.basic_block
entry = func.append_basic_block('loop.entry')
body = func.append_basic_block('loop.body')
incr = func.append_basic_block('loop.incr')
end = func.append_basic_block('loop.end')
ndim = len(order)
if ndim == 0:
with position(builder, entry):
builder.branch(body)
with position(builder, incr):
builder.branch(end)
with position(builder, body):
yield _loop_info(entry=entry, body=body, incr=incr, end=end, indices=[])
return
cond = func.append_basic_block('loop.cond')
outer_axis = order[-1]
#### populate loop entry ####
with position(builder, entry):
# sentry valid ranges
valid = reduce(builder.and_, [compare_signed(builder, '<', s, e)
for s, e in zip(begins, ends)])
builder.cbranch(valid, cond, end)
#### populate loop cond ####
with position(builder, cond):
# initialize indices
indices = [builder.phi(intp) for _ in range(ndim)]
for dim, (ind, ibegin) in enumerate(zip(indices, begins)):
ind.name = 'index.%d' % dim
ind.add_incoming(ibegin, entry)
# check if indices has ended
pred = compare_signed(builder, '<', indices[outer_axis], ends[outer_axis])
builder.cbranch(pred, body, end)
#### populate loop body ####
with position(builder, body):
if dbg:
fmt = '[%s]\n' % ', '.join(['%lld'] * ndim)
debug(builder, fmt, *indices)
yield _loop_info(entry=entry, body=body, incr=incr,
end=end, indices=indices)
#### populate loop increment ####
lastaxes = []
nextbb = incr
remain = [(ax, indices[ax]) for ax in reversed(order)]
while remain:
ax, ind = remain.pop()
with position(builder, nextbb):
for lastax, lastval in lastaxes:
indices[lastax].add_incoming(lastval, builder.basic_block)
indnext = builder.add(ind, steps[ax])
pred = compare_signed(builder, '<', indnext, ends[ax])
ind.add_incoming(indnext, builder.basic_block)
nextbb = func.append_basic_block('incr_%d' % ax)
builder.cbranch(pred, cond, nextbb)
lastaxes.append((ax, begins[ax]))
for ax, ind in remain:
ind.add_incoming(ind, builder.basic_block)
else:
with position(builder, nextbb):
builder.branch(end)
#### position back to the original block ####
assert builder.basic_block is orig
| {
"repo_name": "XinSong/blaze",
"path": "blaze/compute/bkernel/kernelgen.py",
"copies": "2",
"size": "5735",
"license": "bsd-3-clause",
"hash": 5231556046368226000,
"line_mean": 31.9597701149,
"line_max": 85,
"alpha_frac": 0.5883173496,
"autogenerated": false,
"ratio": 3.620580808080808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00945051634232997,
"num_lines": 174
} |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
from ctypes import (
CFUNCTYPE,
POINTER,
c_int,
c_longlong,
c_void_p,
cast,
create_string_buffer,
)
import libarchive
import libarchive.ffi as ffi
from fsspec import open_files
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.implementations.memory import MemoryFile
from fsspec.utils import DEFAULT_BLOCK_SIZE
# Libarchive requires seekable files or memory only for certain archive
# types. However, since we read the directory first to cache the contents
# and also allow random access to any file, the file-like object needs
# to be seekable no matter what.
# Seek call-backs (not provided in the libarchive python wrapper)
SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int)
read_set_seek_callback = ffi.ffi(
"read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int
)
new_api = hasattr(ffi, "NO_OPEN_CB")
@contextmanager
def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size):
"""Read an archive from a seekable file-like object.
The `file` object must support the standard `readinto` and 'seek' methods.
"""
buf = create_string_buffer(block_size)
buf_p = cast(buf, c_void_p)
def read_func(archive_p, context, ptrptr):
# readinto the buffer, returns number of bytes read
length = file.readinto(buf)
# write the address of the buffer into the pointer
ptrptr = cast(ptrptr, POINTER(c_void_p))
ptrptr[0] = buf_p
# tell libarchive how much data was written into the buffer
return length
def seek_func(archive_p, context, offset, whence):
file.seek(offset, whence)
# tell libarchvie the current position
return file.tell()
read_cb = ffi.READ_CALLBACK(read_func)
seek_cb = SEEK_CALLBACK(seek_func)
if new_api:
open_cb = ffi.NO_OPEN_CB
close_cb = ffi.NO_CLOSE_CB
else:
open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB)
close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB)
with libarchive.read.new_archive_read(format_name, filter_name) as archive_p:
read_set_seek_callback(archive_p, seek_cb)
ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
yield libarchive.read.ArchiveRead(archive_p)
class LibArchiveFileSystem(AbstractArchiveFileSystem):
"""Compressed archives as a file-system (read-only)
Supports the following formats:
tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
Microsoft CAB, 7-Zip, WARC
See the libarchive documentation for further restrictions.
https://www.libarchive.org/
Keeps file object open while instance lives. It only works in seekable
file-like objects. In case the filesystem does not support this kind of
file object, it is recommended to cache locally.
This class is pickleable, but not necessarily thread-safe (depends on the
platform). See libarchive documentation for details.
"""
root_marker = ""
protocol = "libarchive"
def __init__(
self,
fo="",
mode="r",
target_protocol=None,
target_options=None,
block_size=DEFAULT_BLOCK_SIZE,
**kwargs,
):
"""
Parameters
----------
fo: str or file-like
Contains ZIP, and must exist. If a str, will fetch file using
`open_files()`, which must return one file exactly.
mode: str
Currently, only 'r' accepted
target_protocol: str (optional)
If ``fo`` is a string, this value can be used to override the
FS protocol inferred from a URL
target_options: dict (optional)
Kwargs passed when instantiating the target FS, if ``fo`` is
a string.
"""
super().__init__(self, **kwargs)
if mode != "r":
raise ValueError("Only read from archive files accepted")
if isinstance(fo, str):
files = open_files(fo, protocol=target_protocol, **(target_options or {}))
if len(files) != 1:
raise ValueError(
'Path "{}" did not resolve to exactly'
'one file: "{}"'.format(fo, files)
)
fo = files[0]
self.fo = fo.__enter__() # the whole instance is a context
self.block_size = block_size
self.dir_cache = None
@contextmanager
def _open_archive(self):
self.fo.seek(0)
with custom_reader(self.fo, block_size=self.block_size) as arc:
yield arc
@classmethod
def _strip_protocol(cls, path):
# file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
fields = {
"name": "pathname",
"size": "size",
"created": "ctime",
"mode": "mode",
"uid": "uid",
"gid": "gid",
"mtime": "mtime",
}
if self.dir_cache is not None:
return
self.dir_cache = {}
list_names = []
with self._open_archive() as arc:
for entry in arc:
if not entry.isdir and not entry.isfile:
# Skip symbolic links, fifo entries, etc.
continue
self.dir_cache.update(
{
dirname
+ "/": {"name": dirname + "/", "size": 0, "type": "directory"}
for dirname in self._all_dirnames(set(entry.name))
}
)
f = {key: getattr(entry, fields[key]) for key in fields}
f["type"] = "directory" if entry.isdir else "file"
list_names.append(entry.name)
self.dir_cache[f["name"]] = f
# libarchive does not seem to return an entry for the directories (at least
# not in all formats), so get the directories names from the files names
self.dir_cache.update(
{
dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
for dirname in self._all_dirnames(list_names)
}
)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise NotImplementedError
data = bytes()
with self._open_archive() as arc:
for entry in arc:
if entry.pathname != path:
continue
for block in entry.get_blocks(entry.size):
data = block
break
else:
raise ValueError
return MemoryFile(fs=self, path=path, data=data)
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/implementations/libarchive.py",
"copies": "1",
"size": "7073",
"license": "bsd-3-clause",
"hash": 5293636143006004000,
"line_mean": 32.680952381,
"line_max": 88,
"alpha_frac": 0.5785381026,
"autogenerated": false,
"ratio": 4.039406053683609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5117944156283609,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
from warnings import warn
import networkx as nx
from datashape import discover
from .utils import expand_tuples, ignoring
ooc_types = set() # Out-of-Core types
class FailedConversionWarning(UserWarning):
def __init__(self, src, dest, exc):
self.src = src
self.dest = dest
self.exc = exc
def __str__(self):
return 'Failed on %s -> %s. Working around\nError message:\n%s' % (
self.src.__name__, self.dest.__name__, self.exc,
)
class NetworkDispatcher(object):
def __init__(self, name):
self.name = name
self.graph = nx.DiGraph()
def register(self, a, b, cost=1.0):
sigs = expand_tuples([a, b])
def _(func):
for a, b in sigs:
self.graph.add_edge(b, a, cost=cost, func=func)
return func
return _
def path(self, *args, **kwargs):
return path(self.graph, *args, **kwargs)
def __call__(self, *args, **kwargs):
return _transform(self.graph, *args, **kwargs)
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types,
**kwargs):
""" Transform source to target type using graph of transformations """
x = source
excluded_edges = excluded_edges or set()
with ignoring(NotImplementedError):
if 'dshape' not in kwargs:
kwargs['dshape'] = discover(x)
pth = path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types)
try:
for (A, B, f) in pth:
x = f(x, excluded_edges=excluded_edges, **kwargs)
return x
except NotImplementedError as e:
if kwargs.get('raise_on_errors'):
raise
warn(FailedConversionWarning(A, B, e))
new_exclusions = excluded_edges | set([(A, B)])
return _transform(graph, target, source, excluded_edges=new_exclusions,
**kwargs)
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
if source not in graph:
for cls in valid_subclasses:
if issubclass(source, cls):
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
result = [(src, tgt, graph.edge[src][tgt]['func'])
for src, tgt in zip(pth, pth[1:])]
return result
# Catch-all subclasses
from collections import Iterator
import numpy as np
valid_subclasses = [Iterator, np.ndarray]
@contextmanager
def without_edges(g, edges):
edges = edges or []
held = dict()
for a, b in edges:
held[(a, b)] = g.edge[a][b]
g.remove_edge(a, b)
try:
yield g
finally:
for (a, b), kwargs in held.items():
g.add_edge(a, b, **kwargs)
| {
"repo_name": "cpcloud/odo",
"path": "odo/core.py",
"copies": "2",
"size": "3511",
"license": "bsd-3-clause",
"hash": -8352149692060789000,
"line_mean": 29.2672413793,
"line_max": 79,
"alpha_frac": 0.5872970664,
"autogenerated": false,
"ratio": 3.7915766738660905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007149535280206787,
"num_lines": 116
} |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
import numpy as np
from matplotlib.colors import ColorConverter
from ..extern.vispy import scene
from glue.external import six
class MultiColorScatter(scene.visuals.Markers):
"""
This is a helper class to make it easier to show multiple markers at
specific positions and control exactly which marker should be on top of
which.
"""
def __init__(self, *args, **kwargs):
self.layers = {}
self._combined_data = None
self._skip_update = False
super(MultiColorScatter, self).__init__(*args, **kwargs)
@contextmanager
def delay_update(self):
self._skip_update = True
yield
self._skip_update = False
def allocate(self, label):
if label in self.layers:
raise ValueError("Layer {0} already exists".format(label))
else:
self.layers[label] = {'data': None,
'mask': None,
'color': np.asarray((1., 1., 1.)),
'alpha': 1.,
'zorder': lambda: 0,
'size': 10,
'visible': True}
def deallocate(self, label):
self.layers.pop(label)
def set_data_values(self, label, x, y, z):
"""
Set the position of the datapoints
"""
# TODO: avoid re-allocating an array every time
self.layers[label]['data'] = np.array([x, y, z]).transpose()
self._update()
def set_visible(self, label, visible):
self.layers[label]['visible'] = visible
self._update()
def set_mask(self, label, mask):
self.layers[label]['mask'] = mask
self._update()
def set_size(self, label, size):
if not np.isscalar(size) and size.ndim > 1:
raise Exception("size should be a 1-d array")
self.layers[label]['size'] = size
self._update()
def set_color(self, label, rgb):
if isinstance(rgb, six.string_types):
rgb = ColorConverter().to_rgb(rgb)
self.layers[label]['color'] = np.asarray(rgb)
self._update()
def set_alpha(self, label, alpha):
self.layers[label]['alpha'] = alpha
self._update()
def set_zorder(self, label, zorder):
self.layers[label]['zorder'] = zorder
self._update()
def _update(self):
if self._skip_update:
return
data = []
colors = []
sizes = []
for label in sorted(self.layers, key=lambda x: self.layers[x]['zorder']()):
layer = self.layers[label]
if not layer['visible'] or layer['data'] is None:
continue
if layer['mask'] is None:
n_points = layer['data'].shape[0]
else:
n_points = np.sum(layer['mask'])
if n_points > 0:
# Data
if layer['mask'] is None:
data.append(layer['data'])
else:
data.append(layer['data'][layer['mask'], :])
# Colors
if layer['color'].ndim == 1:
rgba = np.hstack([layer['color'], 1])
rgba = np.repeat(rgba, n_points).reshape(4, -1).transpose()
else:
rgba = layer['color'].copy()
rgba[:, 3] *= layer['alpha']
colors.append(rgba)
# Sizes
if np.isscalar(layer['size']):
size = np.repeat(layer['size'], n_points)
else:
if layer['mask'] is None:
size = layer['size']
else:
size = layer['size'][layer['mask']]
sizes.append(size)
if len(data) == 0:
self.visible = False
return
else:
self.visible = True
data = np.vstack(data)
colors = np.vstack(colors)
sizes = np.hstack(sizes)
self.set_data(data, edge_color=colors, face_color=colors, size=sizes)
def draw(self, *args, **kwargs):
if len(self.layers) == 0:
return
else:
try:
super(MultiColorScatter, self).draw(*args, **kwargs)
except:
pass
if __name__ == "__main__": # pragma: nocover
from ..extern.vispy import app, scene
canvas = scene.SceneCanvas(keys='interactive')
view = canvas.central_widget.add_view()
view.camera = scene.TurntableCamera(up='z', fov=60)
x = np.random.random(20)
y = np.random.random(20)
z = np.random.random(20)
multi_scat = MultiColorScatter(parent=view.scene)
multi_scat.allocate('data')
multi_scat.set_zorder('data', lambda: 0)
multi_scat.set_data_values('data', x, y, z)
multi_scat.allocate('subset1')
multi_scat.set_mask('subset1', np.random.random(20) > 0.5)
multi_scat.set_color('subset1', 'red')
multi_scat.set_zorder('subset1', lambda: 1)
multi_scat.allocate('subset2')
multi_scat.set_mask('subset2', np.random.random(20) > 0.5)
multi_scat.set_color('subset2', 'green')
multi_scat.set_zorder('subset2', lambda: 2)
multi_scat.set_alpha('subset2', 0.5)
multi_scat.set_size('subset2', 20)
axis = scene.visuals.XYZAxis(parent=view.scene)
canvas.show()
app.run()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/multi_scatter.py",
"copies": "1",
"size": "5553",
"license": "bsd-2-clause",
"hash": -6358218402749636000,
"line_mean": 27.921875,
"line_max": 83,
"alpha_frac": 0.5222402305,
"autogenerated": false,
"ratio": 3.91056338028169,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49328036107816897,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..convert import convert
from ..append import append
from ..create import create
from datashape import discover, dshape, DataShape, Record, Tuple
import datashape
import numpy as np
from dynd import nd
@convert.register(np.ndarray, nd.array, cost=1000.1)
def dynd_to_numpy(x, **kwargs):
return nd.as_numpy(x, allow_copy=True)
@convert.register(nd.array, np.ndarray, cost=1000.8)
def numpy_to_dynd(x, **kwargs):
return nd.array(x, type=str(discover(x)))
@convert.register(list, nd.array, cost=100.0)
def dynd_to_list(x, **kwargs):
return nd.as_py(x, tuple=True)
@convert.register(nd.array, list, cost=90.0)
def list_to_dynd(L, **kwargs):
ds = kwargs['dshape']
if isinstance(ds.measure, Tuple):
measure = Record([['f%d'%i, typ] for i, typ in
enumerate(ds.measure.parameters[0])])
ds = DataShape(*(ds.shape + (measure,)))
return nd.array(L, dtype=str(ds))
@create.register(nd.array)
def create_dynd_array(x, dshape=None):
return nd.empty(str(dshape))
@discover.register(nd.array)
def discover_dynd_array(x, **kwargs):
return dshape(str(nd.type_of(x)))
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/dynd.py",
"copies": "6",
"size": "1191",
"license": "bsd-3-clause",
"hash": 4181856336189260000,
"line_mean": 26.6976744186,
"line_max": 64,
"alpha_frac": 0.6884970613,
"autogenerated": false,
"ratio": 2.962686567164179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6651183628464179,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable, auto_combine, concat
from xarray.core.pycompat import OrderedDict, iteritems
from . import (
InaccessibleArray, TestCase, assert_array_equal, assert_equal,
assert_identical, raises_regex, requires_dask)
from .test_dataset import create_test_data
class TestConcatDataset(TestCase):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data()
for k in list(data.variables):
if 'dim3' in data[k].dims:
del data[k]
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
assert_identical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in iteritems(dataset.data_vars)),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
assert_identical(data, concat(datasets, dim))
dim = 'dim2'
assert_identical(
data, concat(datasets, data[dim]))
assert_identical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in iteritems(data.coords)
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
assert_identical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
assert_array_equal(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
assert_equal(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
assert_equal(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
assert_identical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
assert_identical(expected, actual)
for coords in ['minimal', []]:
with raises_regex(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
assert_identical(expected, actual)
with raises_regex(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
assert_identical(data, actual)
actual = concat(split_data[::-1], 'dim1')
assert_identical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])})
ds2 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 3])])})
actual = concat([ds1, ds2], 'y')
expected = Dataset({'foo': DataArray([[1, 2, np.nan], [1, np.nan, 2]],
dims=['y', 'x'],
coords={'x': [1, 2, 3]})})
assert_identical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with raises_regex(ValueError, 'must supply at least one'):
concat([], 'dim1')
with raises_regex(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with raises_regex(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
assert_identical(
data, concat([data0, data1], 'dim1', compat='equals'))
with raises_regex(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with raises_regex(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with raises_regex(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with raises_regex(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with raises_regex(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with raises_regex(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with raises_regex(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
assert_identical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
assert_identical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
assert_identical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1, 2]}, {'y': ('x', [-1, -2, -2])})
assert_identical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])},
{'x': [0, 1], 'y': [0]})
assert_identical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 1, 't': [0]})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
assert_identical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 2, 't': [0]})]
with pytest.raises(ValueError):
concat(objs, 't', coords='minimal')
def test_concat_dim_is_variable(self):
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
coord = Variable('y', [3, 4])
expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
actual = concat(objs, coord)
assert_identical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']])
expected = Dataset({'x': x})
actual = concat([expected.isel(x=slice(2)),
expected.isel(x=slice(2, None))], 'x')
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
class TestConcatDataArray(TestCase):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'], coords={'x': [0, 1]})
actual = concat([foo, bar], 'w')
assert_equal(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
assert_identical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
assert_identical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])
).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
assert_identical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
assert_identical(expected, actual)
with raises_regex(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with raises_regex(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
def test_concat_encoding(self):
# Regression test for GH1297
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
foo.encoding = {"complevel": 5}
ds.encoding = {"unlimited_dims": 'x'}
assert concat([foo, foo], dim="x").encoding == foo.encoding
assert concat([ds, ds], dim="x").encoding == ds.encoding
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
assert combined.shape == (2, 3, 3)
assert combined.dims == ('z', 'x', 'y')
class TestAutoCombine(TestCase):
@requires_dask # only for toolz
def test_auto_combine(self):
objs = [Dataset({'x': [0]}), Dataset({'x': [1]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1]})
assert_identical(expected, actual)
actual = auto_combine([actual])
assert_identical(expected, actual)
objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1, 2]})
assert_identical(expected, actual)
# ensure auto_combine handles non-sorted variables
objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])),
Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))]
actual = auto_combine(objs)
expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1])})
assert_identical(expected, actual)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})]
with raises_regex(ValueError, 'too many .* dimensions'):
auto_combine(objs)
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
with raises_regex(ValueError, 'cannot infer dimension'):
auto_combine(objs)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})]
with pytest.raises(KeyError):
auto_combine(objs)
@requires_dask # only for toolz
def test_auto_combine_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [Dataset({'a': ('x', [0]), 'x': [0]}),
Dataset({'b': ('x', [0]), 'x': [0]}),
Dataset({'a': ('x', [1]), 'x': [1]})]
expected = Dataset({'a': ('x', [0, 1]), 'b': ('x', [0, np.nan])},
{'x': [0, 1]})
actual = auto_combine(datasets)
assert_identical(expected, actual)
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}),
Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})]
expected = Dataset({'a': (('t', 'x'),
[[np.nan, 2, 3], [1, 2, np.nan]])},
{'x': [0, 1, 2]})
actual = auto_combine(datasets, concat_dim='t')
assert_identical(expected, actual)
@requires_dask # only for toolz
def test_auto_combine_still_fails(self):
# concat can't handle new variables (yet):
# https://github.com/pydata/xarray/issues/508
datasets = [Dataset({'x': 0}, {'y': 0}),
Dataset({'x': 1}, {'y': 1, 'z': 1})]
with pytest.raises(ValueError):
auto_combine(datasets, 'y')
@requires_dask # only for toolz
def test_auto_combine_no_concat(self):
objs = [Dataset({'x': 0}), Dataset({'y': 1})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1})
assert_identical(expected, actual)
objs = [Dataset({'x': 0, 'y': 1}), Dataset({'y': np.nan, 'z': 2})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1, 'z': 2})
assert_identical(expected, actual)
data = Dataset({'x': 0})
actual = auto_combine([data, data, data], concat_dim=None)
assert_identical(data, actual)
# Single object, with a concat_dim explicitly provided
# Test the issue reported in GH #1988
objs = [Dataset({'x': 0, 'y': 1})]
dim = DataArray([100], name='baz', dims='baz')
actual = auto_combine(objs, concat_dim=dim)
expected = Dataset({'x': ('baz', [0]), 'y': ('baz', [1])},
{'baz': [100]})
assert_identical(expected, actual)
# Just making sure that auto_combine is doing what is
# expected for non-scalar values, too.
objs = [Dataset({'x': ('z', [0, 1]), 'y': ('z', [1, 2])})]
dim = DataArray([100], name='baz', dims='baz')
actual = auto_combine(objs, concat_dim=dim)
expected = Dataset({'x': (('baz', 'z'), [[0, 1]]),
'y': (('baz', 'z'), [[1, 2]])},
{'baz': [100]})
assert_identical(expected, actual)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/tests/test_combine.py",
"copies": "1",
"size": "16696",
"license": "apache-2.0",
"hash": 2786254008473380000,
"line_mean": 40.9497487437,
"line_max": 79,
"alpha_frac": 0.5176090081,
"autogenerated": false,
"ratio": 3.6374727668845317,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4655081774984532,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .core import common_subexpression
from .expressions import Expr, Symbol
from .reductions import Reduction, Summary, summary
from ..dispatch import dispatch
from datashape import dshape, Record, Option, Unit, var
__all__ = ['by', 'By', 'count_values']
def _names_and_types(expr):
schema = expr.dshape.measure
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
return schema.names, schema.types
if isinstance(schema, Unit):
return [expr._name], [expr.dshape.measure]
raise ValueError("Unable to determine name and type of %s" % expr)
class By(Expr):
""" Split-Apply-Combine Operator
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = by(t['name'], t['amount'].sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 150), ('Bob', 200)]
"""
__slots__ = 'grouper', 'apply'
@property
def _child(self):
return common_subexpression(self.grouper, self.apply)
@property
def schema(self):
grouper_names, grouper_types = _names_and_types(self.grouper)
apply_names, apply_types = _names_and_types(self.apply)
names = grouper_names + apply_names
types = grouper_types + apply_types
return dshape(Record(list(zip(names, types))))
@property
def dshape(self):
# TODO: think if this should be generalized
return var * self.schema
@dispatch(Expr, (Summary, Reduction))
def by(grouper, apply):
return By(grouper, apply)
@dispatch(Expr)
def by(grouper, **kwargs):
return By(grouper, summary(**kwargs))
def count_values(expr, sort=True):
"""
Count occurrences of elements in this column
Sort by counts by default
Add ``sort=False`` keyword to avoid this behavior.
"""
result = by(expr, count=expr.count())
if sort:
result = result.sort('count', ascending=False)
return result
from datashape.predicates import iscollection
from .expressions import dshape_method_list
dshape_method_list.extend([
(iscollection, set([count_values])),
])
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/split_apply_combine.py",
"copies": "1",
"size": "2344",
"license": "bsd-3-clause",
"hash": 4021139576315974000,
"line_mean": 25.3370786517,
"line_max": 70,
"alpha_frac": 0.6318259386,
"autogenerated": false,
"ratio": 3.668231611893584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4800057550493584,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .core import common_subexpression
from .expressions import Expr, symbol
from .reductions import Reduction, Summary, summary
from ..dispatch import dispatch
from datashape import dshape, Record, Option, Unit, var
__all__ = ['by', 'By', 'count_values']
def _names_and_types(expr):
schema = expr.dshape.measure
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
return schema.names, schema.types
if isinstance(schema, Unit):
return [expr._name], [expr.dshape.measure]
raise ValueError("Unable to determine name and type of %s" % expr)
class By(Expr):
""" Split-Apply-Combine Operator
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = by(t['name'], total=t['amount'].sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 150), ('Bob', 200)]
"""
__slots__ = '_hash', 'grouper', 'apply'
@property
def _child(self):
return common_subexpression(self.grouper, self.apply)
@property
def schema(self):
grouper_names, grouper_types = _names_and_types(self.grouper)
apply_names, apply_types = _names_and_types(self.apply)
names = grouper_names + apply_names
types = grouper_types + apply_types
return dshape(Record(list(zip(names, types))))
@property
def dshape(self):
# TODO: think if this should be generalized
return var * self.schema
def __str__(self):
s = 'by('
s += str(self.grouper) + ', '
if isinstance(self.apply, Summary):
s += str(self.apply)[len('summary('):-len(')')]
else:
s += str(self.apply)
s += ')'
return s
@dispatch(Expr, Reduction)
def by(grouper, s):
raise ValueError("This syntax has been removed.\n"
"Please name reductions with keyword arguments.\n"
"Before: by(t.name, t.amount.sum())\n"
"After: by(t.name, total=t.amount.sum())")
@dispatch(Expr, Summary)
def by(grouper, s):
return By(grouper, s)
@dispatch(Expr)
def by(grouper, **kwargs):
return By(grouper, summary(**kwargs))
def count_values(expr, sort=True):
"""
Count occurrences of elements in this column
Sort by counts by default
Add ``sort=False`` keyword to avoid this behavior.
"""
result = by(expr, count=expr.count())
if sort:
result = result.sort('count', ascending=False)
return result
from datashape.predicates import iscollection
from .expressions import dshape_method_list
dshape_method_list.extend([
(lambda ds: len(ds.shape) == 1, set([count_values])),
])
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/split_apply_combine.py",
"copies": "2",
"size": "2872",
"license": "bsd-3-clause",
"hash": 7743877256153608000,
"line_mean": 26.3523809524,
"line_max": 70,
"alpha_frac": 0.6103760446,
"autogenerated": false,
"ratio": 3.567701863354037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026678263458801764,
"num_lines": 105
} |
from __future__ import absolute_import, division, print_function
from .core import VAR, PatternSet
from ..util import copy_doc
class StaticPatternSet(PatternSet):
"""A set of patterns.
Forms a structure for fast matching over a set of patterns. This allows for
matching of terms to patterns for many patterns at the same time.
Attributes
----------
patterns : list
A list of `Pattern`s included in the `PatternSet`.
"""
def __init__(self, context, patterns):
self.context = context
self.patterns = patterns
if not all(self.context == p.context for p in patterns):
raise ValueError("All patterns in a PatternSet must have the same"
"context")
self._net = build_automata(self.context, self.patterns)
@copy_doc(PatternSet.match_iter)
def match_iter(self, t):
inds, data = self._match(t)
for i in inds:
pat = self.patterns[i]
subs = _process_match(pat, data)
if subs is not None:
yield pat, subs
def _match(self, t):
"""Performs the actual matching operation"""
net = self._net
head = self.context.head
pot = self.context.traverse(t, 'path')
path_lookup = {}
for term, ind in pot:
var_val = net.get(VAR, None)
val = net.get(head(term), None)
if val is not None:
net = val
if var_val is not None:
path_lookup[ind] = term
continue
if var_val is not None:
net = var_val
pot.skip()
path_lookup[ind] = term
continue
return [], {}
return net, path_lookup
def _process_match(pat, cache):
path_lookup = pat._path_lookup
subs = {}
for var, paths in path_lookup.items():
subs[var] = first = cache[paths[0]]
for p in paths[1:]:
new = cache[p]
if new != first:
return None
return subs
# All functionality below here is used for compilation of a set of patterns
# into a deterministic matching automata, as described in:
#
# Nedjah, Nadia. "Minimal deterministic left-to-right pattern-matching
# automata." ACM Sigplan Notices 33.1 (1998): 40-47.
class MSet(tuple):
"""A set of `MItem`s"""
def __new__(cls, items):
return tuple.__new__(cls, (items,))
def __str__(self):
data = ",\n".join(str(i) for i in self.items)
return "MSet([\n{0}])".format(data)
@property
def items(self):
return self[0]
def is_equivalent(self, other):
"""Determines if two matching sets are equivalent"""
has_pair = []
# First loop through self, find equivalent match in other, and put in
# `has_pair` If no match exists, return False.
for i1 in self.items:
for i2 in other.items:
if i1 == i2:
has_pair.append(i2)
break
else:
return False
# Then for every item in other, ensure it has been placed at least once
# in `has_pair`. If it hasn't, return `False`.
for i2 in other.items:
if i2 not in has_pair:
return False
return True
class MItem(tuple):
"""Represents a single item in a matching set."""
def __new__(cls, suffix, rule):
return tuple.__new__(cls, (suffix, rule))
@property
def suffix(self):
return self[0]
@property
def rule(self):
return self[1]
def flatten_with_arity(context, pattern):
"""term -> [(term, arity), ...]"""
vars = pattern.vars
def _helper(pot):
for t, a in pot:
if t in vars:
yield VAR, a
else:
yield context.head(t), a
return list(_helper(context.traverse(pattern.pat, 'arity')))
def next_terms(M):
"""Set of then next term after the matching position of an MSet"""
return set(pat.suffix[0] for pat in M.items if pat.suffix)
def match_on(term, *items):
"""Determine if the matching point of term is in items"""
return term.suffix and term.suffix[0] in items
def delta(context, M, s):
"""The transition function"""
set1 = [MItem(p.suffix[1:], p.rule) for p in M.items if match_on(p, s, VAR)]
var_next = [p for p in set1 if match_on(p, (VAR, 0))]
set2 = []
for var_pat in var_next:
for con_pat in set1:
func, arity = con_pat.suffix[0]
if func is VAR:
continue
suffix = [(func, arity)] + [(VAR, 0)]*arity + var_pat.suffix[1:]
new = MItem(suffix, var_pat.rule)
set2.append(new)
return MSet(set1 + set2)
def build_automata(context, patterns):
"""Construct the deterministic automata"""
temp = (flatten_with_arity(context, p) for p in patterns)
L = [MSet([MItem(p, i) for (i, p) in enumerate(temp)])]
paths = [{}]
for ind, mset in enumerate(L):
for t in next_terms(mset):
new = delta(context, mset, t)
if new:
for new_ind, match_set in enumerate(L):
if match_set.is_equivalent(new):
break
else:
L.append(new)
paths.append({})
new_ind = len(L) - 1
# TODO: setting for varargs to include (sym, arity) as key?
paths[ind][t[0]] = new_ind
# Replace leaf dicts with sets of the matching patterns
for i, lk in enumerate(paths):
if lk == {}:
paths[i] = tuple(sorted(set(m.rule for m in L[i].items)))
# Finalize the automata
for lk in paths:
if isinstance(lk, dict):
for k, v in lk.items():
lk[k] = paths[v]
return paths[0]
| {
"repo_name": "jcrist/pinyon",
"path": "pinyon/matching/static.py",
"copies": "1",
"size": "5950",
"license": "bsd-3-clause",
"hash": 7259310712229119000,
"line_mean": 28.4554455446,
"line_max": 80,
"alpha_frac": 0.5425210084,
"autogenerated": false,
"ratio": 3.8312942691564715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9872384578931053,
"avg_score": 0.0002861397250836195,
"num_lines": 202
} |
from __future__ import absolute_import, division, print_function
from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a
ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
pass
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a
ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional
layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional
layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
| {
"repo_name": "deehzee/cs231n",
"path": "assignment2/cs231n/layer_utils.py",
"copies": "1",
"size": "2654",
"license": "mit",
"hash": -1755899769476504300,
"line_mean": 26.0816326531,
"line_max": 70,
"alpha_frac": 0.648455162,
"autogenerated": false,
"ratio": 3.338364779874214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4486819941874214,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from cStringIO import StringIO
from elftools.elf.constants import P_FLAGS, SH_FLAGS
from elftools.elf.elffile import ELFFile
from ret.ia32 import IA32Simulator
from ret.state import MemoryRange
from struct import unpack
from sys import argv, exit, stderr, stdout
from types import NoneType
class ElfMemoryRange(MemoryRange):
"""\
Characteristics of a range of memory allocated by an ELF object.
An instance of this class represents a range of memory defined from
start_address up to (but not including) end_address. The flags attribute
defines the permissions of the memory range (defined by the ELF segment header).
The section attribute is either None or the name of the ELF section associated
with the memory range.
"""
def __init__(self, start_address, end_address, flags, section=None):
"""\
ElfMemoryRange(start_address, end_address, flags, section=None)
-> ElfMemoryRange
Create a new ElfMemoryRange object with the specified attributes.
"""
super(ElfMemoryRange, self).__init__(start_address, end_address)
self.flags = flags
self.section = section
return
def _get_flags(self):
return self._flags
def _set_flags(self, flags):
if not isinstance(flags, int):
raise TypeError(
"flags must be an integer between 0 and 0xffffffff")
if not (0 <= flags <= 0xffffffff):
raise ValueError(
"flags must be an integer between 0 and 0xffffffff")
self._flags = flags
return
flags = property(
_get_flags, _set_flags, None,
"The permissions associated with the memory range.")
def _get_section(self):
return self._section
def _set_section(self, section):
if not isinstance(section, (basestring, NoneType)):
raise TypeError("section must be a string or None")
self._section = section
return
section = property(
_get_section, _set_section, None,
"The section associated with the memory range.")
def _to_tuple(self):
return (super(ElfMemoryRange, self)._to_tuple() +
(self._flags, self._section))
def __repr__(self):
return ("ElfMemoryRange(start_address=0x%x, end_address=0x%x, "
"flags=%s, section=%r)" %
(self.start_address, self.end_address,
decode_program_header_flags(self.flags), self.section))
class ElfMemoryMap(object):
"""\
The memory layout of an ELF object.
The ranges attribute is a sorted tuple of non-overlapping ElfMemoryRange
objects ordered by start_address. Adjacent memory ranges with the same
flags and section are always merged.
"""
def __init__(self):
"""\
Create a new, empty ElfMemoryMap object.
"""
super(ElfMemoryMap, self).__init__()
self._ranges = [] # Always sorted
return
@property
def ranges(self):
return tuple(self._ranges)
@property
def start_address(self):
return self._ranges[0].start_address
@property
def end_address(self):
return self._ranges[-1].end_address
def add(self, new_range):
"""\
mmap.add(new_range)
Add an ElfMemoryRange this memory map, fracturing and merging the memory
map as necessary.
"""
if not isinstance(new_range, ElfMemoryRange):
raise TypeError("new_range must be an ElfMemoryRange instance")
assert new_range.end_address > new_range.start_address
i = 0
while i < len(self._ranges):
mem_range = self._ranges[i]
assert mem_range.end_address > mem_range.start_address
if mem_range.end_address <= new_range.start_address:
# No interaction; continue on
i += 1
continue
# We need to fracture this memory range.
if mem_range.start_address < new_range.start_address:
# Overlap starts after the beginning. Break mem_range into
# two pieces.
low_range = ElfMemoryRange(
start_address=mem_range.start_address,
end_address=new_range.start_address,
flags=mem_range.flags,
section=mem_range.section)
high_range = ElfMemoryRange(
start_address=new_range.start_address,
end_address=mem_range.end_address,
flags=mem_range.flags,
section=mem_range.section)
self._ranges[i:i+1] = [low_range, high_range]
# Continue on; the next run through the loop will have
# mem_range == high_range
i += 1
continue
# Overlap starts at the beginning.
if mem_range.end_address > new_range.end_address:
# new_range occupies only a portion of mem_range starting
# at the beginning. Swap the roles of mem_range and new_range
self._ranges[i] = new_range
new_range, mem_range = mem_range, new_range
# mem_range contains new_range entirely.
# Union mem_range's flags.
mem_range.flags |= new_range.flags
# Make sure the section is compatible.
if mem_range.section is None:
mem_range.section = new_range.section
elif new_range.section is not None and new_range.section != mem_range.section:
raise ValueError("Cannot merge %r and %r: incompatible "
"sections" % (mem_range, new_range))
# Adjust the new segment's start to the end of this memory new_range,
# but make sure we don't create a negative or zero-sized new_range.
if mem_range.end_address >= new_range.end_address:
new_range = None
break
new_range.start_address = mem_range.end_address
i += 1
# end while
if new_range is not None:
# New new_range to add to end of list.
self._ranges.append(new_range)
# Fix up any adjacencies which would otherwise be redundant.
self._merge_adjacent_equal_ranges()
return
def _merge_adjacent_equal_ranges(self):
i = 0
while i < len(self._ranges) - 1:
cur_range = self._ranges[i]
next_range = self._ranges[i+1]
# Are these adjacent and equal?
if (cur_range.end_address == next_range.start_address and
cur_range.flags == next_range.flags and
cur_range.section == next_range.section):
# Yep; merge them and remove the next item
cur_range.end_address = next_range.end_address
del self._ranges[i+1]
# Nope; just move on.
else:
i += 1
return
def __repr__(self):
result = StringIO()
if len(self._ranges) == 0:
return ""
last_end_address = self._ranges[0].start_address
for range in self._ranges:
if last_end_address != range.start_address:
result.write("%08x\n" % (last_end_address + 1,))
result.write("%08x %-20s %s" % (
range.start_address, decode_program_header_flags(range.flags),
range.section if range.section is not None else ""))
last_end_address = range.end_address
result.write("\n")
result.write("%08x" % (last_end_address,))
return result.getvalue()
# end ElfMemoryMap
class RetkitElfDocument(object):
def __init__(self, filename=None, object_filename=None):
self.filename = filename
self.simulator = None
self.object_filename = None
self.object_data = None
self.object_model = None
self.memory_map = None
self.memory_layout = None
self.functions = []
self.globals = []
self.types = []
if object_filename is not None:
self.load_object_file(object_filename)
return
def load_object_file(self, object_filename):
with open(object_filename, "rb") as fd:
self.object_filename = object_filename
self.object_data = fd.read()
stream = StringIO(self.object_data)
self.object_model = ELFFile(stream)
# FIXME: Perform discovery of other simulators.
if self.object_model['e_machine'] in ("EM_386", "EM_486"):
sim_cls = IA32Simulator
else:
raise ValueError("Unhandled machine: %s" %
self.object_model['e_machine'])
self.create_memory_layout()
mem_layout = self.memory_layout.getvalue()
for i in xrange(0x26e8, 0x2700):
print(hex(i), hex(ord(mem_layout[i])))
self.simulator = sim_cls(bits=mem_layout)
self.load_symbol_table()
# end load_object_file
def create_memory_layout(self):
# Figure out segment permissions for mapping to memory.
self.memory_map = ElfMemoryMap()
for segment in self.object_model.iter_segments():
phdr = segment.header
if phdr.p_memsz <= 0:
continue
self.memory_map.add(
ElfMemoryRange(start_address=phdr.p_vaddr,
end_address=phdr.p_vaddr + phdr.p_memsz,
flags=phdr.p_flags))
# Map sections to segments and create a virtual memory layout.
self.memory_layout = StringIO()
for section in self.object_model.iter_sections():
shdr = section.header
if (shdr.sh_flags & SH_FLAGS.SHF_ALLOC == 0 or
shdr.sh_size == 0):
continue
self.memory_map.add(
ElfMemoryRange(start_address=shdr.sh_addr,
end_address=shdr.sh_addr + shdr.sh_size,
flags=0,
section=section.name))
self.memory_layout.seek(shdr.sh_addr)
self.memory_layout.write(section.data())
return
def load_symbol_table(self):
symtab = self.object_model.get_section_by_name(".symtab")
if symtab is None:
return
for sym in symtab.iter_symbols():
symhdr = sym.entry
if symhdr.st_info['type'] != "STT_FUNC":
continue
name = (sym.name if sym.name is not None
else "func_%x" % symhdr.st_value)
self.functions.append(self.simulator.create_function(
name=name,
start_address=symhdr.st_value,
end_address=symhdr.st_value + symhdr.st_size))
self.functions.sort(key=lambda fn: fn.start_address)
return
# end RetkitElfDocument
_section_flags_to_strings = dict(
[(v, k) for k, v in SH_FLAGS.__dict__.iteritems() if k.startswith("SHF_")])
def decode_section_flags(sh_flags):
return _decode_flags(_section_flags_to_strings, sh_flags)
_program_header_flags_to_strings = dict(
[(v, k) for k, v in P_FLAGS.__dict__.iteritems() if k.startswith("PF_")])
def decode_program_header_flags(p_flags):
return _decode_flags(_program_header_flags_to_strings, p_flags)
def _decode_flags(flag_dict, flags):
result = []
for mask, name in sorted(flag_dict.items()):
if flags & mask != 0:
result.append(name)
flags &= ~mask
if flags != 0:
result.append("0x%x" % flags)
if len(result) == 0:
return "0"
return " | ".join(result)
def usage(fd=stderr):
print("""\
Usage: python -m ret.elf <filename>
Disassemble the specified ELF file.
""", file=stderr)
return
def main(args):
from getopt import getopt, GetoptError
try:
opts, args = getopt(args, "h", ["help"])
except GetoptError as e:
stderr.write(str(e) + "\n")
usage()
return 1
for opt, value in opts:
if opt in ("-h", "--help"):
usage(stdout)
return 0
if len(args) == 0:
print("Filename not specified.", file=stderr)
usage()
return 1
read_elf_object(args[0])
return 0
if __name__ == "__main__":
exit(main(argv[1:]))
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
| {
"repo_name": "dacut/ret",
"path": "ret/elf.py",
"copies": "1",
"size": "12636",
"license": "bsd-2-clause",
"hash": -4743343774672632000,
"line_mean": 32.0785340314,
"line_max": 90,
"alpha_frac": 0.5739157961,
"autogenerated": false,
"ratio": 4.005071315372425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036557402546880105,
"num_lines": 382
} |
from __future__ import absolute_import, division, print_function
from databroker import DataBroker as db, get_images, get_table, get_events
from filestore.api import register_handler, deregister_handler
from filestore.retrieve import _h_registry, _HANDLER_CACHE
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import skxray.core.roi as roi
from datetime import datetime
import logging
import time
from math import isnan
import h5py
from filestore.retrieve import HandlerBase
from eiger_io.pims_reader import EigerImages
EIGER_MD_DICT = {
'y_pixel_size': 'entry/instrument/detector/y_pixel_size',
'x_pixel_size': 'entry/instrument/detector/x_pixel_size',
'detector_distance': 'entry/instrument/detector/detector_distance',
'incident_wavelength': 'entry/instrument/beam/incident_wavelength',
'frame_time': 'entry/instrument/detector/frame_time',
'beam_center_x': 'entry/instrument/detector/beam_center_x',
'beam_center_y': 'entry/instrument/detector/beam_center_y',
'count_time': 'entry/instrument/detector/count_time',
'pixel_mask': 'entry/instrument/detector/detectorSpecific/pixel_mask',
}
class FixedEigerImages(EigerImages):
def __init__(self, path, metadata):
super().__init__(path)
self._metadata = metadata
@property
def md(self):
return self._metadata
@property
def dtype(self):
return self.pixel_type
@property
def shape(self):
return self.frame_shape
class LazyEigerHandler(HandlerBase):
specs = {'AD_EIGER'} | HandlerBase.specs
def __init__(self, fpath, frame_per_point, mapping=None):
# create pims handler
self.vals_dict = EIGER_MD_DICT.copy()
if mapping is not None:
self.vals_dict.update(mapping)
self._base_path = fpath
self.fpp = frame_per_point
def __call__(self, seq_id):
import h5py
master_path = '{}_{}_master.h5'.format(self._base_path, seq_id)
md = {}
print('hdf5 path = %s' % master_path)
with h5py.File(master_path, 'r') as f:
md = {k: f[v].value for k, v in self.vals_dict.items()}
# the pixel mask from the eiger contains:
# 1 -- gap
# 2 -- dead
# 4 -- under-responsive
# 8 -- over-responsive
# 16 -- noisy
pixel_mask = md['pixel_mask']
pixel_mask[pixel_mask>0] = 1
pixel_mask[pixel_mask==0] = 2
pixel_mask[pixel_mask==1] = 0
pixel_mask[pixel_mask==2] = 1
md['framerate'] = 1./md['frame_time']
# TODO Return a multi-dimensional PIMS seq
return FixedEigerImages(master_path, md)
deregister_handler('AD_EIGER')
_HANDLER_CACHE.clear()
register_handler('AD_EIGER', LazyEigerHandler)
def print_attrs(name, obj):
print(name)
for key, val in obj.attrs.items():
print(" %s: %s" % (key, val))
class Reverse_Coordinate(object):
def __init__(self, indexable, mask):
self.indexable = indexable
self.mask = mask
self.shape = indexable.shape
self.length= len(indexable)
def __getitem__(self, key ):
if self.mask is not None:
img =self.indexable[key] * self.mask
else:
img = self.indexable[key]
if len(img.shape) ==3:
img_=img[:,::-1,:]
if len(img.shape)==2:
img_=img[::-1,:]
return img_
class RemoveHotSpots(object):
def __init__(self, indexable, threshold= 1E7 ):
self.indexable = indexable
self.threshold = threshold
try:
self.N = len( indexable )
except:
self.N= indexable.length
def _get_mask(self, Ns=None,Ne=None ):
mask = np.ones_like(np.array(self.indexable[0]))
if Ns is None:Ns=0
if Ne is None:Ne=self.N
#for key in range(self.N):
for key in range( Ns,Ne ):
data = np.array( self.indexable[key]) #.copy()
badp = np.where( data >= self.threshold )
if len(badp[0])!=0:
mask[badp] = 0
return mask
def __getitem__(self, key):
return self.indexable[key] * mask
class Masker(object):
def __init__(self, indexable, mask):
self.indexable = indexable
self.mask = mask
self.length = len( indexable)
def __getitem__(self, key):
img =self.indexable[key] * self.mask
return img
def view_image(imgsr,i):
#from ipywidgets import interact
fig, ax = plt.subplots()
ax.imshow(imgsr[i], interpolation='nearest', cmap='viridis',
origin='lower', norm= LogNorm(vmin=0.001, vmax=1e1 ) )
ax.set_title("Browse the Image Stack")
plt.show()
import time
def view_image_movie(imgsr,sleeps=1, ims=0, ime = 1):
fig, ax = plt.subplots()
for i in range( ims, ime ):
ax.imshow(imgsr[i], interpolation='nearest', cmap='viridis',
origin='lower', norm= LogNorm( vmin=0.001, vmax=1e1 ) )
ax.set_title("images_%s"%i)
time.sleep( sleeps )
plt.draw()
if i!=ime-1:
ax.cla()
def average_img( imgs, Ns=None,Ne = None ):
''' Do imgs average,
Optiions:
imgs: the image seriers
Ns: the start image
Ne: the last image
e.g.,
ave = average_img(imgs)'''
import numpy as np
ave = np.zeros_like(imgs[0],dtype =float)
#if Ns is None:Ns=0
#if Ne is None:Ne=len(imgs)
#if Ne>len(imgs):Ne=len(imgs)
for i in range(Ns,Ne):
ave += imgs[i]
ave /= (Ne-Ns)
return ave
import xray_vision
import xray_vision.mpl_plotting as mpl_plot
from xray_vision.mpl_plotting import speckle
from xray_vision.mask.manual_mask import ManualMask
import skxray.core.roi as roi
import skxray.core.correlation as corr
import skxray.core.utils as utils
def run_time(t0):
'''Calculate running time of a program
Parameters
----------
t0: time_string, t0=time.time()
The start time
Returns
-------
Print the running time
One usage
---------
t0=time.time()
.....(the running code)
run_time(t0)
'''
elapsed_time = time.time() - t0
print ('Total time: %.2f min' %(elapsed_time/60.))
def cpopen( filename=None, inDir=None, ):
import _pickle as cPickle
import os
if inDir!=None:filename=inDir + filename
if os.path.isfile(filename):
#fp=file(filename,'rb')
fp=open(filename,'rb')
data = cPickle.load(fp)
fp.close()
return data
else:
return None
##for radial average intensity
def circular_average(image, calibrated_center, threshold=-1, nx=None,
pixel_size=None, mask=None):
"""Circular average of the the image data
The circular average is also known as the radial integration
Parameters
----------
image : array
Image to compute the average as a function of radius
calibrated_center : tuple
The center of the image in pixel units
argument order should be (row, col)
threshold : int, optional
Ignore counts above `threshold`
nx : int, optional
Number of bins in R. Defaults to 100
pixel_size : tuple, optional
The size of a pixel (in a real unit, like mm).
argument order should be (pixel_height, pixel_width)
Returns
-------
bin_centers : array
The center of each bin in R. shape is (nx, )
ring_averages : array
Radial average of the image. shape is (nx, ).
"""
radial_val = utils.radial_grid(calibrated_center, image.shape, pixel_size )
if nx is None:
ps =np.min( pixel_size )
max_x = np.max(radial_val)/ps
min_x = np.min(radial_val)/ps
nx = int(max_x - min_x)
#print (nx)
if mask is None: mask =1
bin_edges, sums, counts = bin_1D(np.ravel(radial_val * mask ),
np.ravel(image * mask), nx)
th_mask = counts > threshold
ring_averages = sums[th_mask] / counts[th_mask]
bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask]
return bin_centers, ring_averages
def bin_1D(x, y, nx=None, min_x=None, max_x=None):
"""
Bin the values in y based on their x-coordinates
Parameters
----------
x : array
position
y : array
intensity
nx : integer, optional
number of bins to use defaults to default bin value
min_x : float, optional
Left edge of first bin defaults to minimum value of x
max_x : float, optional
Right edge of last bin defaults to maximum value of x
Returns
-------
edges : array
edges of bins, length nx + 1
val : array
sum of values in each bin, length nx
count : array
The number of counts in each bin, length nx
"""
# handle default values
if min_x is None:
min_x = np.min(x)
if max_x is None:
max_x = np.max(x)
if nx is None:
nx = int(max_x - min_x)
# use a weighted histogram to get the bin sum
bins = np.linspace(start=min_x, stop=max_x, num=nx+1, endpoint=True)
val, _ = np.histogram(a=x, bins=bins, weights=y)
# use an un-weighted histogram to get the counts
count, _ = np.histogram(a=x, bins=bins)
# return the three arrays
return bins, val, count
#GiSAXS
##########################################
def make_gisaxs_grid( qr_w= 10, qz_w = 12, dim_r =100,dim_z=120):
y, x = np.indices( [dim_z,dim_r] )
Nr = int(dim_r/qp_w)
Nz = int(dim_z/qz_w)
noqs = Nr*Nz
ind = 1
for i in range(0,Nr):
for j in range(0,Nz):
y[ qr_w*i: qr_w*(i+1), qz_w*j:qz_w*(j+1)]= ind
ind += 1
return y
def get_incident_angles( inc_x0, inc_y0, refl_x0, refl_y0, pixelsize=[75,75], Lsd=5.0):
''' giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in meters
pixelsize: 75 um for Eiger4M detector
get incident_angle (alphai), the title angle (phi)
'''
px,py = pixelsize
phi = np.arctan2( (refl_x0 - inc_x0)*px *10**(-6), (refl_y0 - inc_y0)*py *10**(-6) )
alphai = np.arctan2( (refl_y0 -inc_y0)*py *10**(-6), Lsd ) /2.
#thetai = np.arctan2( (rcenx - bcenx)*px *10**(-6), Lsd ) /2. #??
return alphai,phi
def get_reflected_angles(inc_x0, inc_y0, refl_x0, refl_y0, thetai=0.0,
pixelsize=[75,75], Lsd=5.0,dimx = 2070.,dimy=2167.):
''' giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in meters
pixelsize: 75 um for Eiger4M detector
detector image size: dimx = 2070,dimy=2167 for Eiger4M detector
get reflected angle alphaf (outplane)
reflected angle thetaf (inplane )
'''
alphai, phi = get_incident_angles( inc_x0, inc_y0, refl_x0, refl_y0, pixelsize, Lsd)
print ('The incident_angle (alphai) is: %s'%(alphai* 180/np.pi))
px,py = pixelsize
y, x = np.indices( [dimy,dimx] )
#alphaf = np.arctan2( (y-inc_y0)*py*10**(-6), Lsd )/2 - alphai
alphaf = np.arctan2( (y-inc_y0)*py*10**(-6), Lsd ) - alphai
thetaf = np.arctan2( (x-inc_x0)*px*10**(-6), Lsd )/2 - thetai
return alphaf,thetaf, alphai, phi
def convert_gisaxs_pixel_to_q( inc_x0, inc_y0, refl_x0, refl_y0,
pixelsize=[75,75], Lsd=5.0,dimx = 2070.,dimy=2167.,
thetai=0.0, lamda=1.0 ):
''' giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in meters
pixelsize: 75 um for Eiger4M detector
detector image size: dimx = 2070,dimy=2167 for Eiger4M detector
wavelength: angstron
get: q_parallel (qp), q_direction_z (qz)
'''
alphaf,thetaf,alphai, phi = get_reflected_angles( inc_x0, inc_y0, refl_x0, refl_y0, thetai, pixelsize, Lsd,dimx,dimy)
pref = 2*np.pi/lamda
qx = np.cos( alphaf)*np.cos( 2*thetaf) - np.cos( alphai )*np.cos( 2*thetai)
qy_ = np.cos( alphaf)*np.sin( 2*thetaf) - np.cos( alphai )*np.sin ( 2*thetai)
qz_ = np.sin(alphaf) + np.sin(alphai)
qy = qz_* np.sin( phi) + qy_*np.cos(phi)
qz = qz_* np.cos( phi) - qy_*np.sin(phi)
qr = np.sqrt( qx**2 + qy**2 )
return qx*pref , qy*pref , qr*pref , qz*pref
def get_qedge( qstart,qend,qwidth,noqs, ):
''' DOCUMENT make_qlist( )
give qstart,qend,qwidth,noqs
return a qedge by giving the noqs, qstart,qend,qwidth.
a qcenter, which is center of each qedge
KEYWORD: None '''
import numpy as np
qcenter = np.linspace(qstart,qend,noqs)
#print ('the qcenter is: %s'%qcenter )
qedge=np.zeros(2*noqs)
qedge[::2]= ( qcenter- (qwidth/2) ) #+1 #render even value
qedge[1::2]= ( qcenter+ qwidth/2) #render odd value
return qedge, qcenter
def get_qmap_label( qmap, qedge ):
import numpy as np
'''give a qmap and qedge to bin the qmap into a label array'''
edges = np.atleast_2d(np.asarray(qedge)).ravel()
label_array = np.digitize(qmap.ravel(), edges, right=False)
label_array = np.int_(label_array)
label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2
label_array = label_array.reshape( qmap.shape )
return label_array
def get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center ):
qzmax = label_array_qz.max()
label_array_qr_ = np.zeros( label_array_qr.shape )
ind = np.where(label_array_qr!=0)
label_array_qr_[ind ] = label_array_qr[ ind ] + 1E4 #add some large number to qr
label_array_qzr = label_array_qz * label_array_qr_
#convert label_array_qzr to [1,2,3,...]
uqzr = np.unique( label_array_qzr )[1:]
uqz = np.unique( label_array_qz )[1:]
uqr = np.unique( label_array_qr )[1:]
#print (uqzr)
label_array_qzr_ = np.zeros_like( label_array_qzr )
newl = np.arange( 1, len(uqzr)+1)
qzc =list(qz_center) * len( uqr )
qrc= [ [qr_center[i]]*len( uqz ) for i in range(len( uqr )) ]
for i, label in enumerate(uqzr):
#print (i, label)
label_array_qzr_.ravel()[ np.where( label_array_qzr.ravel() == label)[0] ] = newl[i]
return np.int_(label_array_qzr_), np.array( qzc ), np.concatenate(np.array(qrc ))
def get_qr_intensity_series( qr, data,vert_rect, mask=None,show_roi=True ):
V_K_label_array = roi.rectangles(vert_rect, data.shape) #(y,x, hight, wdith)
if mask is not None:V_K_label_array =V_K_label_array * mask
qr_ = qr *V_K_label_array
data_ = data*V_K_label_array
if False:
fig, ax = plt.subplots()
im = plt.imshow(data_,origin='lower',norm= LogNorm( vmin=.1, vmax=1e0 ) )
fig.colorbar(im)
plt.show()
data_ave = np.average( data_, axis=0)
qr_ave = np.average( qr_, axis=0)
if show_roi:
fig, ax = plt.subplots()
im = plt.imshow(data_,origin='lower',norm= LogNorm( vmin=.1, vmax=1e0 ) )
fig.colorbar(im)
plt.show()
def get_qr_intensity( qr, data,vert_rect,mask=None, show_roi=True ):
V_K_label_array = roi.rectangles(vert_rect, data.shape) #(y,x, hight, wdith)
if mask is not None:V_K_label_array =V_K_label_array * mask
if show_roi:
data_ = data*V_K_label_array
fig, ax = plt.subplots()
im = plt.imshow(data_,origin='lower',norm= LogNorm( vmin=.1, vmax=1e0 ) )
fig.colorbar(im)
plt.show()
fig, ax = plt.subplots()
for i, vr in enumerate( vert_rect):
print (i, vr)
V_K_label_array_i = roi.rectangles((vr,), data.shape) #(y,x, hight, wdith)
if mask is not None:V_K_label_array_i =V_K_label_array_i * mask
roi_pixel_num = np.sum( V_K_label_array_i, axis=0)
qr_ = qr *V_K_label_array_i
data_ = data*V_K_label_array_i
qr_ave = np.sum( qr_, axis=0)/roi_pixel_num
data_ave = np.sum( data_, axis=0)/roi_pixel_num
ax.plot( qr_ave, data_ave, '--o', label= 'interest_roi_%i'%i)
ax.set_xlabel( r'$q_r$', fontsize=15)
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend()
def get_qr_tick_label( qr, label_array_qr, inc_x0):
rticks =[]
rticks_label = []
num = len( np.unique( label_array_qr ) )
for i in range( 1, num ):
ind = np.where( label_array_qr==i )[1]
tick = round( qr[label_array_qr==i].mean(),2)
if ind[0] < inc_x0 and ind[-1]>inc_x0:
mean1 = int( (ind[np.where(ind < inc_x0)[0]]).mean() )
mean2 = int( (ind[np.where(ind > inc_x0)[0]]).mean() )
rticks.append( mean1)
rticks.append(mean2)
rticks_label.append( tick )
rticks_label.append( tick )
else:
mean = int( ind.mean() )
rticks.append(mean)
rticks_label.append( tick )
#print (rticks)
return np.array(rticks), np.array(rticks_label)
def get_qz_tick_label( qz, label_array_qz):
num = len( np.unique( label_array_qz ) )
zticks = np.array( [ int( np.where( label_array_qz==i )[0].mean() ) for i in range( 1,num ) ])
zticks_label = np.array( [ round( qz[label_array_qz==i].mean(),2) for i in range( 1, num ) ])
return zticks,zticks_label
def show_qzr_map( qr, qz, inc_x0, data=None, Nzline=10,Nrline=10 ):
import matplotlib.pyplot as plt
import copy
import matplotlib.cm as mcm
cmap='viridis'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
qr_start, qr_end, qr_num = qr.min(),qr.max(), Nzline
qz_start, qz_end, qz_num = qz.min(),qz.max(), Nrline
qr_edge, qr_center = get_qedge(qr_start , qr_end, ( qr_end- qr_start)/(qr_num+100), qr_num )
qz_edge, qz_center = get_qedge( qz_start, qz_end, (qz_end - qz_start)/(qz_num+100 ) , qz_num )
label_array_qz = get_qmap_label( qz, qz_edge)
label_array_qr = get_qmap_label( qr, qr_edge)
labels_qz, indices_qz = roi.extract_label_indices( label_array_qz )
labels_qr, indices_qr = roi.extract_label_indices( label_array_qr )
num_qz = len(np.unique( labels_qz ))
num_qr = len(np.unique( labels_qr ))
fig, ax = plt.subplots()
if data is None:
data=qr+qz
im = ax.imshow(data, cmap='viridis',origin='lower')
else:
im = ax.imshow(data, cmap='viridis',origin='lower', norm= LogNorm(vmin=0.001, vmax=1e1))
imr=ax.imshow(label_array_qr, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
imz=ax.imshow(label_array_qz, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
caxr = fig.add_axes([0.81, 0.1, 0.03, .8]) #x,y, width, heigth
cba = fig.colorbar(im, cax=caxr )
ax.set_xlabel(r'$q_r$', fontsize=18)
ax.set_ylabel(r'$q_z$',fontsize=18)
zticks,zticks_label = get_qz_tick_label(qz,label_array_qz)
#rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0)
rticks,rticks_label = zip(*sorted( zip( *get_qr_tick_label( qr, label_array_qr, inc_x0) )) )
ax.set_yticks( zticks[::1] )
yticks = zticks_label[::1]
ax.set_yticklabels(yticks, fontsize=9)
stride = int(len(rticks)/7)
ax.set_xticks( rticks[::stride] )
xticks = rticks_label[::stride]
ax.set_xticklabels(xticks, fontsize=9)
ax.set_title( 'Q-zr_Map', y=1.03,fontsize=18)
plt.show()
#GiSAXS End
###############################
def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,
imshow_cmap='gray', **kwargs): #norm=LogNorm(),
"""
This will plot the required ROI's(labeled array) on the image
Additional kwargs are passed through to `ax.imshow`.
If `vmin` is in kwargs, it is clipped to minimum of 0.5.
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
image : array
The image array
label_array : array
Expected to be an unsigned integer array. 0 is background,
positive integers label region of interest
cmap : str or colormap, optional
Color map to use for plotting the label_array, defaults to 'None'
imshow_cmap : str or colormap, optional
Color map to use for plotting the image, defaults to 'gray'
norm : str, optional
Normalize scale data, defaults to 'Lognorm()'
Returns
-------
im : AxesImage
The artist added to the axes
im_label : AxesImage
The artist added to the axes
"""
ax.set_aspect('equal')
if log_img:
im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(norm),**kwargs) #norm=norm,
else:
im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=norm,**kwargs) #norm=norm,
im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, norm=norm,
**kwargs) # norm=norm,
return im, im_label
#import numpy as np
#from . import utils as core
#from . import roi
from lmfit import minimize, Model, Parameters
logger = logging.getLogger(__name__)
def multi_tau_auto_corr(num_levels, num_bufs, labels, images):
##comments, please add start_image, end_image, the default as None
from skxray.core import roi
from skxray.core import utils as core
"""
This function computes one-time correlations.
It uses a scheme to achieve long-time correlations inexpensively
by downsampling the data, iteratively combining successive frames.
The longest lag time computed is num_levels * num_bufs.
Parameters
----------
num_levels : int
how many generations of downsampling to perform, i.e.,
the depth of the binomial tree of averaged frames
num_bufs : int, must be even
maximum lag step to compute in each generation of
downsampling
labels : array
labeled array of the same shape as the image stack;
each ROI is represented by a distinct label (i.e., integer)
images : iterable of 2D arrays
dimensions are: (rr, cc)
Returns
-------
g2 : array
matrix of normalized intensity-intensity autocorrelation
shape (num_levels, number of labels(ROI))
lag_steps : array
delay or lag steps for the multiple tau analysis
shape num_levels
Notes
-----
The normalized intensity-intensity time-autocorrelation function
is defined as
:math ::
g_2(q, t') = \frac{<I(q, t)I(q, t + t')> }{<I(q, t)>^2}
; t' > 0
Here, I(q, t) refers to the scattering strength at the momentum
transfer vector q in reciprocal space at time t, and the brackets
<...> refer to averages over time t. The quantity t' denotes the
delay time
This implementation is based on code in the language Yorick
by Mark Sutton, based on published work. [1]_
References
----------
.. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton,
"Area detector based photon correlation in the regime of
short data batches: Data reduction for dynamic x-ray
scattering," Rev. Sci. Instrum., vol 70, p 3274-3289, 2000.
"""
# In order to calculate correlations for `num_bufs`, images must be
# kept for up to the maximum lag step. These are stored in the array
# buffer. This algorithm only keeps number of buffers and delays but
# several levels of delays number of levels are kept in buf. Each
# level has twice the delay times of the next lower one. To save
# needless copying, of cyclic storage of images in buf is used.
if num_bufs % 2 != 0:
raise ValueError("number of channels(number of buffers) in "
"multiple-taus (must be even)")
if hasattr(images, 'frame_shape'):
# Give a user-friendly error if we can detect the shape from pims.
if labels.shape != images.frame_shape:
raise ValueError("Shape of the image stack should be equal to"
" shape of the labels array")
# get the pixels in each label
label_mask, pixel_list = roi.extract_label_indices(labels)
num_rois = np.max(label_mask)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(num_rois+1))
num_pixels = num_pixels[1:]
if np.any(num_pixels == 0):
raise ValueError("Number of pixels of the required roi's"
" cannot be zero, "
"num_pixels = {0}".format(num_pixels))
# G holds the un normalized auto-correlation result. We
# accumulate computations into G as the algorithm proceeds.
G = np.zeros(((num_levels + 1)*num_bufs/2, num_rois),
dtype=np.float64)
# matrix of past intensity normalizations
past_intensity_norm = np.zeros(((num_levels + 1)*num_bufs/2, num_rois),
dtype=np.float64)
# matrix of future intensity normalizations
future_intensity_norm = np.zeros(((num_levels + 1)*num_bufs/2, num_rois),
dtype=np.float64)
# Ring buffer, a buffer with periodic boundary conditions.
# Images must be keep for up to maximum delay in buf.
buf = np.zeros((num_levels, num_bufs, np.sum(num_pixels)),
dtype=np.float64)
# to track processing each level
track_level = np.zeros(num_levels)
# to increment buffer
cur = np.ones(num_levels, dtype=np.int64)
# to track how many images processed in each level
img_per_level = np.zeros(num_levels, dtype=np.int64)
start_time = time.time() # used to log the computation time (optionally)
for n, img in enumerate(images):
cur[0] = (1 + cur[0]) % num_bufs # increment buffer
# Put the image into the ring buffer.
buf[0, cur[0] - 1] = (np.ravel(img))[pixel_list]
# Compute the correlations between the first level
# (undownsampled) frames. This modifies G,
# past_intensity_norm, future_intensity_norm,
# and img_per_level in place!
_process(buf, G, past_intensity_norm,
future_intensity_norm, label_mask,
num_bufs, num_pixels, img_per_level,
level=0, buf_no=cur[0] - 1)
# check whether the number of levels is one, otherwise
# continue processing the next level
processing = num_levels > 1
# Compute the correlations for all higher levels.
level = 1
while processing:
if not track_level[level]:
track_level[level] = 1
processing = False
else:
prev = 1 + (cur[level - 1] - 2) % num_bufs
cur[level] = 1 + cur[level] % num_bufs
buf[level, cur[level] - 1] = (buf[level - 1, prev - 1] +
buf[level - 1,
cur[level - 1] - 1])/2
# make the track_level zero once that level is processed
track_level[level] = 0
# call the _process function for each multi-tau level
# for multi-tau levels greater than one
# Again, this is modifying things in place. See comment
# on previous call above.
_process(buf, G, past_intensity_norm,
future_intensity_norm, label_mask,
num_bufs, num_pixels, img_per_level,
level=level, buf_no=cur[level]-1,)
level += 1
# Checking whether there is next level for processing
processing = level < num_levels
# ending time for the process
end_time = time.time()
logger.info("Processing time for {0} images took {1} seconds."
"".format(n, (end_time - start_time)))
# the normalization factor
if len(np.where(past_intensity_norm == 0)[0]) != 0:
g_max = np.where(past_intensity_norm == 0)[0][0]
else:
g_max = past_intensity_norm.shape[0]
# g2 is normalized G
g2 = (G[:g_max] / (past_intensity_norm[:g_max] *
future_intensity_norm[:g_max]))
# Convert from num_levels, num_bufs to lag frames.
tot_channels, lag_steps = core.multi_tau_lags(num_levels, num_bufs)
lag_steps = lag_steps[:g_max]
return g2, lag_steps
def _process(buf, G, past_intensity_norm, future_intensity_norm,
label_mask, num_bufs, num_pixels, img_per_level, level, buf_no):
"""
Internal helper function. This modifies inputs in place.
This helper function calculates G, past_intensity_norm and
future_intensity_norm at each level, symmetric normalization is used.
Parameters
----------
buf : array
image data array to use for correlation
G : array
matrix of auto-correlation function without
normalizations
past_intensity_norm : array
matrix of past intensity normalizations
future_intensity_norm : array
matrix of future intensity normalizations
label_mask : array
labels of the required region of interests(roi's)
num_bufs : int, even
number of buffers(channels)
num_pixels : array
number of pixels in certain roi's
roi's, dimensions are : [number of roi's]X1
img_per_level : array
to track how many images processed in each level
level : int
the current multi-tau level
buf_no : int
the current buffer number
Notes
-----
:math ::
G = <I(\tau)I(\tau + delay)>
:math ::
past_intensity_norm = <I(\tau)>
:math ::
future_intensity_norm = <I(\tau + delay)>
"""
img_per_level[level] += 1
# in multi-tau correlation other than first level all other levels
# have to do the half of the correlation
if level == 0:
i_min = 0
else:
i_min = num_bufs//2
for i in range(i_min, min(img_per_level[level], num_bufs)):
t_index = level*num_bufs/2 + i
delay_no = (buf_no - i) % num_bufs
past_img = buf[level, delay_no]
future_img = buf[level, buf_no]
# get the matrix of auto-correlation function without normalizations
tmp_binned = (np.bincount(label_mask,
weights=past_img*future_img)[1:])
G[t_index] += ((tmp_binned / num_pixels - G[t_index]) /
(img_per_level[level] - i))
# get the matrix of past intensity normalizations
pi_binned = (np.bincount(label_mask,
weights=past_img)[1:])
past_intensity_norm[t_index] += ((pi_binned/num_pixels
- past_intensity_norm[t_index]) /
(img_per_level[level] - i))
# get the matrix of future intensity normalizations
fi_binned = (np.bincount(label_mask,
weights=future_img)[1:])
future_intensity_norm[t_index] += ((fi_binned/num_pixels
- future_intensity_norm[t_index]) /
(img_per_level[level] - i))
return None # modifies arguments in place!
def interp_zeros( data ):
from scipy.interpolate import interp1d
gf = data.ravel()
indice, = gf.nonzero()
start, stop = indice[0], indice[-1]+1
dx,dy = data.shape
x=np.arange( dx*dy )
f = interp1d(x[indice], gf[indice])
gf[start:stop] = f(x[start:stop])
return gf.reshape([dx,dy])
| {
"repo_name": "yugangzhang/chx_backups",
"path": "develop.py",
"copies": "1",
"size": "32745",
"license": "bsd-3-clause",
"hash": -5618336355218712000,
"line_mean": 32.6893004115,
"line_max": 121,
"alpha_frac": 0.5747442358,
"autogenerated": false,
"ratio": 3.36087447398132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.443561870978132,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from ..data import Component, Data
from .io import extract_data_fits, extract_data_hdf5
from ...utils import file_format
from ..coordinates import coordinates_from_header
from .helpers import set_default_factory, __factories__
__all__ = ['is_casalike', 'gridded_data', 'casalike_cube']
def is_hdf5(filename):
# All hdf5 files begin with the same sequence
with open(filename, 'rb') as infile:
return infile.read(8) == b'\x89HDF\r\n\x1a\n'
def is_fits(filename):
from ...external.astro import fits
try:
with fits.open(filename):
return True
except IOError:
return False
def gridded_data(filename, format='auto', **kwargs):
"""
Construct an n - dimensional data object from ``filename``. If the
format cannot be determined from the extension, it can be
specified using the ``format`` option. Valid formats are 'fits' and
'hdf5'.
"""
result = Data()
# Try and automatically find the format if not specified
if format == 'auto':
format = file_format(filename)
# Read in the data
if is_fits(filename):
from ...external.astro import fits
arrays = extract_data_fits(filename, **kwargs)
header = fits.getheader(filename)
result.coords = coordinates_from_header(header)
elif is_hdf5(filename):
arrays = extract_data_hdf5(filename, **kwargs)
else:
raise Exception("Unkonwn format: %s" % format)
for component_name in arrays:
comp = Component.autotyped(arrays[component_name])
result.add_component(comp, component_name)
return result
def is_gridded_data(filename, **kwargs):
if is_hdf5(filename):
return True
from ...external.astro import fits
if is_fits(filename):
with fits.open(filename) as hdulist:
for hdu in hdulist:
if not isinstance(hdu, (fits.PrimaryHDU, fits.ImageHDU)):
return False
return True
return False
gridded_data.label = "FITS/HDF5 Image"
gridded_data.identifier = is_gridded_data
__factories__.append(gridded_data)
set_default_factory('fits', gridded_data)
set_default_factory('hd5', gridded_data)
set_default_factory('hdf5', gridded_data)
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from ...external.astro import fits
result = Data()
with fits.open(filename, **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
result.add_component(array[[i]], label='STOKES %i' % i)
return result
def is_casalike(filename, **kwargs):
"""
Check if a file is a CASA like cube,
with (P, P, V, Stokes) layout
"""
from ...external.astro import fits
if not is_fits(filename):
return False
with fits.open(filename) as hdulist:
if len(hdulist) != 1:
return False
if hdulist[0].header['NAXIS'] != 4:
return False
from astropy.wcs import WCS
w = WCS(hdulist[0].header)
ax = [a.get('coordinate_type') for a in w.get_axis_types()]
return ax == ['celestial', 'celestial', 'spectral', 'stokes']
casalike_cube.label = 'CASA PPV Cube'
casalike_cube.identifier = is_casalike
__factories__.append(casalike_cube)
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_factories/gridded.py",
"copies": "1",
"size": "3623",
"license": "bsd-3-clause",
"hash": -958854145812053200,
"line_mean": 28.2177419355,
"line_max": 73,
"alpha_frac": 0.6455975711,
"autogenerated": false,
"ratio": 3.5978152929493543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9743412864049354,
"avg_score": 0,
"num_lines": 124
} |
from __future__ import absolute_import, division, print_function
from datashape import DataShape, Record, Fixed, Var, CType, String, JSON
from jinja2 import Template
json_comment_templ = Template("""<font style="font-size:x-small"> # <a href="{{base_url}}?r=data.json">JSON</a></font>
""")
datashape_outer_templ = Template("""
<pre>
type <a href="{{base_url}}?r=datashape">BlazeDataShape</a> = {{ds_html}}
</pre>
""")
def render_datashape_recursive(base_url, ds, indent):
result = ''
if isinstance(ds, DataShape):
for dim in ds[:-1]:
if isinstance(dim, Fixed):
result += ('%d, ' % dim)
elif isinstance(dim, Var):
result += 'var, '
else:
raise TypeError('Cannot render datashape with dimension %r' % dim)
result += render_datashape_recursive(base_url, ds[-1], indent)
elif isinstance(ds, Record):
result += '{' + json_comment_templ.render(base_url=base_url)
for fname, ftype in zip(ds.names, ds.types):
child_url = base_url + '.' + fname
child_result = render_datashape_recursive(child_url,
ftype, indent + ' ')
result += (indent + ' ' +
'<a href="' + child_url + '">' + str(fname) + '</a>'
': ' + child_result + ';')
if isinstance(ftype, Record):
result += '\n'
else:
result += json_comment_templ.render(base_url=child_url)
result += (indent + '}')
elif isinstance(ds, (CType, String, JSON)):
result += str(ds)
else:
raise TypeError('Cannot render datashape %r' % ds)
return result
def render_datashape(base_url, ds):
ds_html = render_datashape_recursive(base_url, ds, '')
return datashape_outer_templ.render(base_url=base_url, ds_html=ds_html)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/io/server/datashape_html.py",
"copies": "8",
"size": "1881",
"license": "bsd-3-clause",
"hash": 6588314757637002000,
"line_mean": 34.4905660377,
"line_max": 118,
"alpha_frac": 0.5608718767,
"autogenerated": false,
"ratio": 3.5093283582089554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8070200234908955,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import discover
from datashape.dispatch import dispatch
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks, Chunks
from ..utils import tmpfile
import os
import numpy as np
import tables
from toolz import first
import datashape
import shutil
__all__ = ['PyTables']
@discover.register((tables.Array, tables.Table))
def discover_tables_node(n):
return datashape.from_numpy(n.shape, n.dtype)
@discover.register(tables.Node)
def discover_tables_node(n):
return discover(n._v_children) # subclasses dict
@discover.register(tables.File)
def discover_tables_node(f):
return discover(f.getNode('/'))
@append.register((tables.Array, tables.Table), np.ndarray)
def numpy_to_pytables(t, x, **kwargs):
t.append(x)
return x
@append.register((tables.Array, tables.Table), object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, tables.Table, cost=3.0)
def pytables_to_numpy(t, **kwargs):
return t[:]
@convert.register(chunks(np.ndarray), tables.Table, cost=3.0)
def pytables_to_numpy_chunks(t, chunksize=2**20, **kwargs):
def load():
for i in range(0, t.shape[0], chunksize):
yield t[i: i + chunksize]
return chunks(np.ndarray)(load)
def dtype_to_pytables(dtype):
""" Convert NumPy dtype to PyTable descriptor
Examples
--------
>>> from tables import Int32Col, StringCol, Time64Col
>>> dt = np.dtype([('name', 'S7'), ('amount', 'i4'), ('time', 'M8[us]')])
>>> dtype_to_pytables(dt) # doctest: +SKIP
{'amount': Int32Col(shape=(), dflt=0, pos=1),
'name': StringCol(itemsize=7, shape=(), dflt='', pos=0),
'time': Time64Col(shape=(), dflt=0.0, pos=2)}
"""
d = {}
for pos, name in enumerate(dtype.names):
dt, _ = dtype.fields[name]
if issubclass(dt.type, np.datetime64):
tdtype = tables.Description({name: tables.Time64Col(pos=pos)}),
else:
tdtype = tables.descr_from_dtype(np.dtype([(name, dt)]))
el = first(tdtype)
getattr(el, name)._v_pos = pos
d.update(el._v_colobjects)
return d
def PyTables(path, datapath, dshape=None, **kwargs):
"""Create or open a ``tables.Table`` object.
Parameters
----------
path : str
Path to a PyTables HDF5 file.
datapath : str
The name of the node in the ``tables.File``.
dshape : str or datashape.DataShape
DataShape to use to create the ``Table``.
Returns
-------
t : tables.Table
Examples
--------
>>> from into.utils import tmpfile
>>> # create from scratch
>>> with tmpfile('.h5') as f:
... t = PyTables(filename, '/bar',
... dshape='var * {volume: float64, planet: string[10, "A"]}')
... data = [(100.3, 'mars'), (100.42, 'jupyter')]
... t.append(data)
... t[:] # doctest: +SKIP
...
array([(100.3, b'mars'), (100.42, b'jupyter')],
dtype=[('volume', '<f8'), ('planet', 'S10')])
"""
def possibly_create_table(filename, dtype):
f = tables.open_file(filename, mode='a')
try:
if datapath not in f:
if dtype is None:
raise ValueError('dshape cannot be None and datapath not'
' in file')
else:
f.create_table('/', datapath.lstrip('/'), description=dtype)
finally:
f.close()
if dshape:
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if dshape[0] == datashape.var:
dshape = dshape.subshape[0]
dtype = dtype_to_pytables(datashape.to_numpy_dtype(dshape))
else:
dtype = None
if os.path.exists(path):
possibly_create_table(path, dtype)
else:
with tmpfile('.h5') as filename:
possibly_create_table(filename, dtype)
shutil.copyfile(filename, path)
return tables.open_file(path, mode='a').get_node(datapath)
@resource.register('pytables://.+', priority=11)
def resource_pytables(path, datapath, **kwargs):
return PyTables(path, datapath, **kwargs)
@dispatch((tables.Table, tables.Array))
def drop(t):
t.remove()
@dispatch(tables.File)
def drop(f):
f.close()
os.remove(f.filename)
ooc_types |= set((tables.Table, tables.Array))
| {
"repo_name": "mrocklin/into",
"path": "into/backends/pytables.py",
"copies": "1",
"size": "4565",
"license": "bsd-3-clause",
"hash": -6277485968781287000,
"line_mean": 26.6666666667,
"line_max": 83,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 3.450491307634165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9549756445788191,
"avg_score": 0.00014697236919459142,
"num_lines": 165
} |
from __future__ import absolute_import, division, print_function
from datashape import discover
from datashape import (float32, float64, string, Option, Record, object_,
datetime_)
import datashape
import pandas as pd
possibly_missing = set((string, datetime_, float32, float64))
@discover.register(pd.DataFrame)
def discover_dataframe(df):
obj = object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [string if dt == obj else dt for dt in dtypes]
odtypes = [Option(dt) if dt in possibly_missing else dt
for dt in dtypes]
schema = datashape.Record(list(zip(names, odtypes)))
return len(df) * schema
@discover.register(pd.Series)
def discover_series(s):
return len(s) * datashape.CType.from_numpy_dtype(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
df2 = df.select_dtypes(include=['object']).apply(pd.to_datetime)
for c in df2.columns:
df[c] = df2[c]
return df
| {
"repo_name": "alexmojaki/odo",
"path": "odo/backends/pandas.py",
"copies": "3",
"size": "1540",
"license": "bsd-3-clause",
"hash": 4489270518052764000,
"line_mean": 26.0175438596,
"line_max": 77,
"alpha_frac": 0.638961039,
"autogenerated": false,
"ratio": 3.564814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001970989016776861,
"num_lines": 57
} |
from __future__ import absolute_import, division, print_function
from datashape import discover
import datashape
import pandas as pd
@discover.register(pd.DataFrame)
def discover_dataframe(df):
obj = datashape.coretypes.object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [datashape.string if dt == obj else dt for dt in dtypes]
schema = datashape.Record(list(zip(names, dtypes)))
return len(df) * schema
@discover.register(pd.Series)
def discover_series(s):
return len(s) * datashape.CType.from_numpy_dtype(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
df2 = df.select_dtypes(include=['object']).apply(pd.to_datetime)
for c in df2.columns:
df[c] = df2[c]
return df
| {
"repo_name": "mrocklin/into",
"path": "into/backends/pandas.py",
"copies": "1",
"size": "1319",
"license": "bsd-3-clause",
"hash": -7222090020611753000,
"line_mean": 24.862745098,
"line_max": 77,
"alpha_frac": 0.639878696,
"autogenerated": false,
"ratio": 3.4986737400530505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638552436053051,
"avg_score": 0,
"num_lines": 51
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape
from blaze.expr import *
from blaze.expr.core import subs
def test_subs():
from blaze.expr import TableSymbol
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t['amount'] + 3
assert expr._subs({3: 4, 'amount': 'id'}).isidentical(
t['id'] + 4)
t2 = TableSymbol('t', '{name: string, amount: int}')
assert t['amount']._subs({t: t2}).isidentical(t2['amount'])
def test_contains():
from blaze.expr import TableSymbol
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert t in t['name']
assert t in t['name'].distinct()
assert t['id'] not in t['name']
assert t['id'] in t['id'].sum()
def test_path():
from blaze.expr import TableSymbol, join
t = TableSymbol('t', '{name: string, amount: int, id: int}')
v = TableSymbol('v', '{city: string, id: int}')
expr = t['amount'].sum()
assert list(path(expr, t)) == [t.amount.sum(), t.amount, t]
assert list(path(expr, t.amount)) == [t.amount.sum(), t.amount]
assert list(path(expr, t.amount)) == [t.amount.sum(), t.amount]
expr = join(t, v).amount
assert list(path(expr, t)) == [join(t, v).amount, join(t, v), t]
assert list(path(expr, v)) == [join(t, v).amount, join(t, v), v]
def test_hash():
e = symbol('e', 'int')
assert '_hash' in e.__slots__
h = hash(e)
assert isinstance(h, int)
assert h == hash(e)
assert hash(Symbol('e', 'int')) == hash(Symbol('e', 'int'))
f = symbol('f', 'int')
assert hash(e) != hash(f)
assert hash(e._subs({'e': 'f'})) != hash(e)
assert hash(e._subs({'e': 'f'})) == hash(f)
"""
def test_subs_on_datashape():
assert subs(dshape('3 * {foo: int}'), {'foo': 'bar'}) == dshape('3 * {bar: int}')
"""
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/expr/tests/test_core.py",
"copies": "2",
"size": "1848",
"license": "bsd-3-clause",
"hash": 1398539418473203700,
"line_mean": 27.875,
"line_max": 85,
"alpha_frac": 0.5741341991,
"autogenerated": false,
"ratio": 2.966292134831461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45404263339314604,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape
from blaze.expr import *
def test_subs():
from blaze.expr import TableSymbol
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t['amount'] + 3
assert expr._subs({3: 4, 'amount': 'id'}).isidentical(
t['id'] + 4)
t2 = TableSymbol('t', '{name: string, amount: int}')
assert t['amount']._subs({t: t2}).isidentical(t2['amount'])
def test_contains():
from blaze.expr import TableSymbol
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert t in t['name']
assert t in t['name'].distinct()
assert t['id'] not in t['name']
assert t['id'] in t['id'].sum()
def test_path():
from blaze.expr import TableSymbol, join
t = TableSymbol('t', '{name: string, amount: int, id: int}')
v = TableSymbol('v', '{city: string, id: int}')
expr = t['amount'].sum()
assert list(path(expr, t)) == [t.amount.sum(), t.amount, t]
assert list(path(expr, t.amount)) == [t.amount.sum(), t.amount]
assert list(path(expr, t.amount)) == [t.amount.sum(), t.amount]
expr = join(t, v).amount
assert list(path(expr, t)) == [join(t, v).amount, join(t, v), t]
assert list(path(expr, v)) == [join(t, v).amount, join(t, v), v]
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/tests/test_core.py",
"copies": "1",
"size": "1318",
"license": "bsd-3-clause",
"hash": 5657616168192241000,
"line_mean": 31.95,
"line_max": 68,
"alpha_frac": 0.5971168437,
"autogenerated": false,
"ratio": 3.0368663594470044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9126630261970534,
"avg_score": 0.0014705882352941176,
"num_lines": 40
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape
from dynd import nd
from . import Capabilities
from .data_descriptor import DDesc
def dynd_descriptor_iter(dyndarr):
for el in dyndarr:
yield DyND_DDesc(el)
class DyND_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a DyND array.
"""
def __init__(self, dyndarr):
if not isinstance(dyndarr, nd.array):
raise TypeError('object is not a dynd array, has type %s' %
type(dyndarr))
self._dyndarr = dyndarr
self._dshape = dshape(nd.dshape_of(dyndarr))
def dynd_arr(self):
return self._dyndarr
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the dynd data descriptor."""
return Capabilities(
# whether dynd arrays can be updated
immutable = self._dyndarr.access_flags == 'immutable',
# dynd arrays are concrete
deferred = False,
# dynd arrays can be either persistent of in-memory
persistent = False,
# dynd arrays can be appended efficiently
appendable = False,
remote = False,
)
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
return len(self._dyndarr)
def __getitem__(self, key):
return DyND_DDesc(self._dyndarr[key])
def __setitem__(self, key, value):
# TODO: This is a horrible hack, we need to specify item setting
# via well-defined interfaces, not punt to another system.
self._dyndarr.__setitem__(key, value)
def __iter__(self):
return dynd_descriptor_iter(self._dyndarr)
def getattr(self, name):
return DyND_DDesc(getattr(self._dyndarr, name))
| {
"repo_name": "talumbau/blaze",
"path": "blaze/datadescriptor/dynd_data_descriptor.py",
"copies": "3",
"size": "1907",
"license": "bsd-3-clause",
"hash": -6256454076907523000,
"line_mean": 27.8939393939,
"line_max": 72,
"alpha_frac": 0.5993707394,
"autogenerated": false,
"ratio": 3.8293172690763053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5928688008476306,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape
from dynd import nd
from . import Capabilities
from .data_descriptor import IDataDescriptor
def dynd_descriptor_iter(dyndarr):
for el in dyndarr:
yield DyNDDataDescriptor(el)
class DyNDDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes a DyND array.
"""
def __init__(self, dyndarr):
if not isinstance(dyndarr, nd.array):
raise TypeError('object is not a dynd array, has type %s' %
type(dyndarr))
self._dyndarr = dyndarr
self._dshape = dshape(nd.dshape_of(dyndarr))
def dynd_arr(self):
return self._dyndarr
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the dynd data descriptor."""
return Capabilities(
# whether dynd arrays can be updated
immutable = self._dyndarr.access_flags == 'immutable',
# dynd arrays are concrete
deferred = False,
# dynd arrays can be either persistent of in-memory
persistent = False,
# dynd arrays can be appended efficiently
appendable = False,
remote = False,
)
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
return len(self._dyndarr)
def __getitem__(self, key):
return DyNDDataDescriptor(self._dyndarr[key])
def __setitem__(self, key, value):
# TODO: This is a horrible hack, we need to specify item setting
# via well-defined interfaces, not punt to another system.
self._dyndarr.__setitem__(key, value)
def __iter__(self):
return dynd_descriptor_iter(self._dyndarr)
def getattr(self, name):
return DyNDDataDescriptor(getattr(self._dyndarr, name))
| {
"repo_name": "markflorisson/blaze-core",
"path": "blaze/datadescriptor/dynd_data_descriptor.py",
"copies": "10",
"size": "1959",
"license": "bsd-3-clause",
"hash": -658561450505429600,
"line_mean": 28.6818181818,
"line_max": 72,
"alpha_frac": 0.6120469627,
"autogenerated": false,
"ratio": 3.9495967741935485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9561643736893549,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape, Record
from toolz import pluck, get, curry, keyfilter
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
import inspect
import datetime
import tempfile
import os
import shutil
import numpy as np
from .compatibility import unicode
def iter_except(func, exception, first=None):
"""Call a `func` repeatedly until `exception` is raised. Optionally call
`first` first.
Parameters
----------
func : callable
Repeatedly call this until `exception` is raised.
exception : Exception
Stop calling `func` when this is raised.
first : callable, optional, default ``None``
Call this first if it isn't ``None``.
Examples
--------
>>> x = {'a': 1, 'b': 2}
>>> def iterate():
... yield 'a'
... yield 'b'
... yield 'c'
...
>>> keys = iterate()
>>> diter = iter_except(lambda: x[next(keys)], KeyError)
>>> list(diter)
[1, 2]
Notes
-----
* Taken from https://docs.python.org/2/library/itertools.html#recipes
"""
try:
if first is not None:
yield first()
while 1: # True isn't a reserved word in Python 2.x
yield func()
except exception:
pass
def ext(filename):
_, e = os.path.splitext(filename)
return e.lstrip(os.extsep)
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def records_to_tuples(ds, data):
""" Transform records into tuples
Examples
--------
>>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> list(records_to_tuples('var * {a: int, b: int}', seq))
[(1, 10), (2, 20)]
>>> records_to_tuples('{a: int, b: int}', seq[0]) # single elements
(1, 10)
>>> records_to_tuples('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
tuples_to_records
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
return pluck(ds.measure.names, data, default=None)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
return get(ds.measure.names, data)
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
def tuples_to_records(ds, data):
""" Transform tuples into records
Examples
--------
>>> seq = [(1, 10), (2, 20)]
>>> list(tuples_to_records('var * {a: int, b: int}', seq)) # doctest: +SKIP
[{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> tuples_to_records('{a: int, b: int}', seq[0]) # doctest: +SKIP
{'a': 1, 'b': 10}
>>> tuples_to_records('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
records_to_tuples
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
names = ds.measure.names
return (dict(zip(names, tup)) for tup in data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
names = ds.measure.names
return dict(zip(names, data))
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def into_path(*path):
""" Path to file in into directory
>>> into_path('backends', 'tests', 'myfile.csv') # doctest: +SKIP
'/home/user/odo/odo/backends/tests/myfile.csv'
"""
import odo
return os.path.join(os.path.dirname(odo.__file__), *path)
from multipledispatch import Dispatcher
sample = Dispatcher('sample')
@curry
def pmap(f, iterable):
"""Map `f` over `iterable` in parallel using a ``ThreadPool``.
"""
p = ThreadPool()
try:
result = p.map(f, iterable)
finally:
p.terminate()
return result
@curry
def write(triple, writer):
"""Write a file using the input from `gentemp` using `writer` and return
its index and filename.
Parameters
----------
triple : tuple of int, str, str
The first element is the index in the set of chunks of a file, the
second element is the path to write to, the third element is the data
to write.
Returns
-------
i, filename : int, str
File's index and filename. This is used to return the index and
filename after splitting files.
Notes
-----
This could be adapted to write to an already open handle, which would
allow, e.g., multipart gzip uploads. Currently we open write a new file
every time.
"""
i, filename, data = triple
with writer(filename, mode='wb') as f:
f.write(data)
return i, filename
def gentemp(it, suffix=None, start=0):
"""Yield an index, a temp file, and data for each element in `it`.
Parameters
----------
it : Iterable
suffix : str or ``None``, optional
Suffix to add to each temporary file's name
start : int, optional
A integer indicating where to start the numbering of chunks in `it`.
"""
for i, data in enumerate(it, start=start): # aws needs parts to start at 1
with tmpfile('.into') as fn:
yield i, fn, data
@curry
def split(filename, nbytes, suffix=None, writer=open, start=0):
"""Split a file into chunks of size `nbytes` with each filename containing
a suffix specified by `suffix`. The file will be written with the ``write``
method of an instance of `writer`.
Parameters
----------
filename : str
The file to split
nbytes : int
Split `filename` into chunks of this size
suffix : str, optional
writer : callable, optional
Callable object to use to write the chunks of `filename`
"""
with open(filename, mode='rb') as f:
byte_chunks = iter(curry(f.read, nbytes), '')
return pmap(write(writer=writer),
gentemp(byte_chunks, suffix=suffix, start=start))
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
"""
return keyfilter(keywords(f).__contains__, kwargs)
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Paramaters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to
| {
"repo_name": "ywang007/odo",
"path": "odo/utils.py",
"copies": "1",
"size": "9843",
"license": "bsd-3-clause",
"hash": -2618550608532728000,
"line_mean": 24.3033419023,
"line_max": 80,
"alpha_frac": 0.5685258559,
"autogenerated": false,
"ratio": 3.803323029366306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4871848885266306,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import dshape, Record
from toolz import pluck, get
from contextlib import contextmanager
import inspect
import datetime
import tempfile
import os
import numpy as np
from .compatibility import unicode
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.remove(filename)
yield filename
try:
if os.path.exists(filename):
os.remove(filename)
except OSError: # Sometimes Windows can't close files
if os.name == 'nt':
os.close(handle)
try:
os.remove(filename)
except OSError: # finally give up
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='wt'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def records_to_tuples(ds, data):
""" Transform records into tuples
Examples
--------
>>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> list(records_to_tuples('var * {a: int, b: int}', seq))
[(1, 10), (2, 20)]
>>> records_to_tuples('{a: int, b: int}', seq[0]) # single elements
(1, 10)
>>> records_to_tuples('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
tuples_to_records
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
return pluck(ds.measure.names, data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
return get(ds.measure.names, data)
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
def tuples_to_records(ds, data):
""" Transform tuples into records
Examples
--------
>>> seq = [(1, 10), (2, 20)]
>>> list(tuples_to_records('var * {a: int, b: int}', seq)) # doctest: +SKIP
[{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> tuples_to_records('{a: int, b: int}', seq[0]) # doctest: +SKIP
{'a': 1, 'b': 10}
>>> tuples_to_records('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
records_to_tuples
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
names = ds.measure.names
return (dict(zip(names, tup)) for tup in data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
names = ds.measure.names
return dict(zip(names, data))
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
from multipledispatch import Dispatcher
sample = Dispatcher('sample')
| {
"repo_name": "mrocklin/into",
"path": "into/utils.py",
"copies": "1",
"size": "5097",
"license": "bsd-3-clause",
"hash": -1584814941669865700,
"line_mean": 23.6231884058,
"line_max": 80,
"alpha_frac": 0.548950363,
"autogenerated": false,
"ratio": 3.70421511627907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.475316547927907,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import *
from datashape.predicates import iscollection
import itertools
from toolz import curry
from .expressions import *
from .expressions import Field, Map
from .arithmetic import maxshape, Arithmetic, UnaryOp
from .math import Math, sin
from .datetime import DateTime
__all__ = ['broadcast', 'Broadcast', 'scalar_symbols']
def broadcast(expr, leaves, scalars=None):
scalars = scalars or scalar_symbols(leaves)
assert len(scalars) == len(leaves)
return Broadcast(tuple(leaves),
tuple(scalars),
expr._subs(dict(zip(leaves, scalars))))
class Broadcast(ElemWise):
""" Fuse scalar expressions over collections
Given elementwise operations on collections, e.g.
>>> a = symbol('a', '100 * int')
>>> t = symbol('t', '100 * {x: int, y: int}')
>>> expr = sin(a) + t.y**2
It may be best to represent this as a scalar expression mapped over a
collection
>>> sa = symbol('a', 'int')
>>> st = symbol('t', '{x: int, y: int}')
>>> sexpr = sin(sa) + st.y**2
>>> expr = Broadcast((a, t), (sa, st), sexpr)
This provides opportunities for optimized computation.
In practice, expressions are often collected into Broadcast expressions
automatically. This class is mainly intented for internal use.
"""
__slots__ = '_hash', '_children', '_scalars', '_scalar_expr'
@property
def dshape(self):
myshape = maxshape(map(shape, self._children))
return DataShape(*(myshape + (self._scalar_expr.schema,)))
@property
def _inputs(self):
return self._children
@property
def _name(self):
return self._scalar_expr._name
@property
def _full_expr(self):
return self._scalar_expr._subs(dict(zip(self._scalars,
self._children)))
def scalar_symbols(exprs):
"""
Gives a sequence of scalar symbols to mirror these expressions
Examples
--------
>>> x = symbol('x', '5 * 3 * int32')
>>> y = symbol('y', '5 * 3 * int32')
>>> xx, yy = scalar_symbols([x, y])
>>> xx._name, xx.dshape
('x', dshape("int32"))
>>> yy._name, yy.dshape
('y', dshape("int32"))
"""
new_names = ('_%d' % i for i in itertools.count(1))
scalars = []
names = set()
for expr in exprs:
if expr._name and expr._name not in names:
name = expr._name
names.add(name)
else:
name = next(new_names)
s = symbol(name, expr.schema)
scalars.append(s)
return scalars
Broadcastable = (Arithmetic, Math, Map, Field, DateTime, UnaryOp)
WantToBroadcast = (Arithmetic, Math, Map, DateTime, UnaryOp)
def broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=WantToBroadcast):
""" Collapse expression down using Broadcast - Tabular cases only
Expressions of type Broadcastables are swallowed into Broadcast
operations
>>> t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
>>> expr = (t.x + 2*t.y).distinct()
>>> broadcast_collect(expr)
distinct(Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * t.y)))
>>> from blaze import exp
>>> expr = t.x + 2 * exp(-(t.x - 1.3) ** 2)
>>> broadcast_collect(expr)
Broadcast(_children=(t,), _scalars=(t,), _scalar_expr=t.x + (2 * (exp(-((t.x - 1.3) ** 2)))))
"""
if (isinstance(expr, WantToBroadcast) and
iscollection(expr.dshape)):
leaves = leaves_of_type(Broadcastable, expr)
expr = broadcast(expr, sorted(leaves, key=str))
# Recurse down
children = [broadcast_collect(i, Broadcastable, WantToBroadcast)
for i in expr._inputs]
return expr._subs(dict(zip(expr._inputs, children)))
@curry
def leaves_of_type(types, expr):
""" Leaves of an expression skipping all operations of type ``types``
"""
if not isinstance(expr, types):
return set([expr])
else:
return set.union(*map(leaves_of_type(types), expr._inputs))
| {
"repo_name": "maxalbert/blaze",
"path": "blaze/expr/broadcast.py",
"copies": "2",
"size": "4155",
"license": "bsd-3-clause",
"hash": 5596870354175858000,
"line_mean": 27.8541666667,
"line_max": 97,
"alpha_frac": 0.602166065,
"autogenerated": false,
"ratio": 3.6351706036745406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5237336668674542,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import Option, real, int_, bool_, isreal, isnumeric
from .arithmetic import UnaryOp, BinOp, Arithmetic
from .expressions import schema_method_list
from ..compatibility import builtins
# Here follows a large number of unary operators. These were selected by
# taking the intersection of the functions in ``math`` and ``numpy``
__all__ = ['abs', 'sqrt', 'sin', 'sinh', 'cos', 'cosh', 'tan', 'tanh', 'exp',
'expm1', 'log', 'log10', 'log1p', 'acos', 'acosh', 'asin', 'asinh',
'atan', 'atanh', 'radians', 'degrees', 'atan2', 'ceil', 'floor',
'trunc', 'isnan', 'notnull', 'UnaryMath', 'BinaryMath',
'greatest', 'least']
class UnaryMath(UnaryOp):
"""Mathematical unary operator with real valued dshape like sin, or exp
"""
_arguments = '_child',
_dtype = real
class BinaryMath(BinOp):
_dtype = real
def __str__(self):
return '%s(%s, %s)' % (type(self).__name__, self.lhs, self.rhs)
_unary_math_names = (
'abs',
'sqrt',
'sin',
'sinh',
'cos',
'cosh',
'tan',
'tanh',
'exp',
'expm1',
'log',
'log10',
'log1p',
'acos',
'acosh',
'asin',
'asinh',
'atan',
'atanh',
'radians',
'degrees',
)
for name in _unary_math_names:
locals()[name] = type(name, (UnaryMath,), {})
_binary_math_names = (
'atan2',
'copysign',
'fmod',
'hypot',
'ldexp',
)
for name in _binary_math_names:
locals()[name] = type(name, (BinaryMath,), {})
class greatest(Arithmetic):
_arguments = 'lhs', 'rhs'
op = builtins.max
def __str__(self):
return 'greatest(%s, %s)' % (self.lhs, self.rhs)
class least(Arithmetic):
_arguments = 'lhs', 'rhs'
op = builtins.min
def __str__(self):
return 'least(%s, %s)' % (self.lhs, self.rhs)
_unary_integer_math = (
'ceil',
'floor',
'trunc',
)
for name in _unary_integer_math:
locals()[name] = type(name, (UnaryMath,), dict(_dtype=int_))
class isnan(UnaryMath):
_dtype = bool_
class notnull(UnaryOp):
""" Return whether an expression is not null
Examples
--------
>>> from blaze import symbol, compute
>>> s = symbol('s', 'var * int64')
>>> expr = notnull(s)
>>> expr.dshape
dshape("var * bool")
>>> list(compute(expr, [1, 2, None, 3]))
[True, True, False, True]
"""
_dtype = bool_
def truncate(expr, precision):
""" Truncate number to precision
Examples
--------
>>> from blaze import symbol, compute
>>> x = symbol('x', 'real')
>>> compute(x.truncate(10), 123)
120
>>> compute(x.truncate(0.1), 3.1415) # doctest: +SKIP
3.1
"""
return expr // precision * precision
schema_method_list.extend([
(isreal, set([isnan])),
(isnumeric, set([truncate])),
(lambda ds: isinstance(ds, Option) or isinstance(ds.measure, Option),
set([notnull]))
])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/math.py",
"copies": "3",
"size": "2997",
"license": "bsd-3-clause",
"hash": -1153204084879701500,
"line_mean": 19.958041958,
"line_max": 78,
"alpha_frac": 0.5632298966,
"autogenerated": false,
"ratio": 3.233009708737864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 143
} |
from __future__ import absolute_import, division, print_function
from datashape import Option, real, int_, bool_, isreal, isnumeric
from .arithmetic import UnaryOp
from .expressions import schema_method_list
# Here follows a large number of unary operators. These were selected by
# taking the intersection of the functions in ``math`` and ``numpy``
__all__ = ['abs', 'sqrt', 'sin', 'sinh', 'cos', 'cosh', 'tan', 'tanh', 'exp',
'expm1', 'log', 'log10', 'log1p', 'acos', 'acosh', 'asin', 'asinh',
'atan', 'atanh', 'radians', 'degrees', 'ceil', 'floor', 'trunc',
'isnan', 'notnull', 'RealMath', 'IntegerMath', 'BooleanMath',
'Math']
class Math(UnaryOp):
pass
class RealMath(Math):
"""Mathematical unary operator with real valued dshape like sin, or exp
"""
_dtype = real
class abs(RealMath): pass
class sqrt(RealMath): pass
class sin(RealMath): pass
class sinh(RealMath): pass
class cos(RealMath): pass
class cosh(RealMath): pass
class tan(RealMath): pass
class tanh(RealMath): pass
class exp(RealMath): pass
class expm1(RealMath): pass
class log(RealMath): pass
class log10(RealMath): pass
class log1p(RealMath): pass
class acos(RealMath): pass
class acosh(RealMath): pass
class asin(RealMath): pass
class asinh(RealMath): pass
class atan(RealMath): pass
class atanh(RealMath): pass
class radians(RealMath): pass
class degrees(RealMath): pass
class IntegerMath(Math):
""" Mathematical unary operator with int valued dshape like ceil, floor """
_dtype = int_
class ceil(IntegerMath): pass
class floor(IntegerMath): pass
class trunc(IntegerMath): pass
class BooleanMath(Math):
""" Mathematical unary operator with bool valued dshape like isnan """
_dtype = bool_
class isnan(BooleanMath): pass
class notnull(BooleanMath):
""" Return whether an expression is not null
Examples
--------
>>> from blaze import symbol, compute
>>> s = symbol('s', 'var * int64')
>>> expr = notnull(s)
>>> expr.dshape
dshape("var * bool")
>>> list(compute(expr, [1, 2, None, 3]))
[True, True, False, True]
"""
pass
def truncate(expr, precision):
""" Truncate number to precision
Examples
--------
>>> from blaze import symbol, compute
>>> x = symbol('x', 'real')
>>> compute(x.truncate(10), 123)
120
>>> compute(x.truncate(0.1), 3.1415) # doctest: +SKIP
3.1
"""
return expr // precision * precision
schema_method_list.extend([
(isreal, set([isnan])),
(isnumeric, set([truncate])),
(lambda ds: isinstance(ds, Option) or isinstance(ds.measure, Option),
set([notnull]))
])
| {
"repo_name": "jdmcbr/blaze",
"path": "blaze/expr/math.py",
"copies": "5",
"size": "2664",
"license": "bsd-3-clause",
"hash": 5275254344811025000,
"line_mean": 23,
"line_max": 79,
"alpha_frac": 0.6509009009,
"autogenerated": false,
"ratio": 3.455252918287938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6606153819187938,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape import real, int_, bool_
from .arithmetic import UnaryOp
# Here follows a large number of unary operators. These were selected by
# taking the intersection of the functions in ``math`` and ``numpy``
__all__ = ['abs', 'sqrt', 'sin', 'sinh', 'cos', 'cosh', 'tan', 'tanh', 'exp',
'expm1', 'log', 'log10', 'log1p', 'acos', 'acosh', 'asin', 'asinh',
'atan', 'atanh', 'radians', 'degrees', 'ceil', 'floor', 'trunc',
'isnan', 'RealMath', 'IntegerMath', 'BooleanMath', 'Math']
class Math(UnaryOp):
pass
class RealMath(Math):
"""Mathematical unary operator with real valued dshape like sin, or exp
"""
_dtype = real
class abs(RealMath): pass
class sqrt(RealMath): pass
class sin(RealMath): pass
class sinh(RealMath): pass
class cos(RealMath): pass
class cosh(RealMath): pass
class tan(RealMath): pass
class tanh(RealMath): pass
class exp(RealMath): pass
class expm1(RealMath): pass
class log(RealMath): pass
class log10(RealMath): pass
class log1p(RealMath): pass
class acos(RealMath): pass
class acosh(RealMath): pass
class asin(RealMath): pass
class asinh(RealMath): pass
class atan(RealMath): pass
class atanh(RealMath): pass
class radians(RealMath): pass
class degrees(RealMath): pass
class IntegerMath(Math):
""" Mathematical unary operator with int valued dshape like ceil, floor """
_dtype = int_
class ceil(IntegerMath): pass
class floor(IntegerMath): pass
class trunc(IntegerMath): pass
class BooleanMath(Math):
""" Mathematical unary operator with bool valued dshape like isnan """
_dtype = bool_
class isnan(BooleanMath): pass
def truncate(expr, precision):
""" Truncate number to precision
Examples
--------
>>> from blaze import symbol, compute
>>> x = symbol('x', 'real')
>>> compute(x.truncate(10), 123)
120
>>> compute(x.truncate(0.1), 3.1415) # doctest: +SKIP
3.1
"""
return expr // precision * precision
from datashape.predicates import isreal, isnumeric
from .expressions import schema_method_list
schema_method_list.extend([
(isreal, set([isnan])),
(isnumeric, set([truncate]))
])
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/math.py",
"copies": "3",
"size": "2228",
"license": "bsd-3-clause",
"hash": 4372641918249375000,
"line_mean": 22.9569892473,
"line_max": 79,
"alpha_frac": 0.6732495512,
"autogenerated": false,
"ratio": 3.4382716049382718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01172813936581254,
"num_lines": 93
} |
from __future__ import absolute_import, division, print_function
from datashape import var
class _slice(object):
""" A hashable slice object
>>> _slice(0, 10, None)
0:10
"""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
def __hash__(self):
return hash((slice, self.start, self.stop, self.step))
def __str__(self):
s = ''
if self.start is not None:
s = s + str(self.start)
s = s + ':'
if self.stop is not None:
s = s + str(self.stop)
if self.step is not None:
s = s + ':' + str(self.step)
return s
def __eq__(self, other):
return (type(self), self.start, self.stop, self.step) == \
(type(other), other.start, other.stop, other.step)
def as_slice(self):
return slice(self.start, self.stop, self.step)
__repr__ = __str__
class hashable_list(tuple):
def __str__(self):
return str(list(self))
def hashable_index(index):
""" Convert slice-thing into something hashable
>>> hashable_index(1)
1
>>> isinstance(hash(hashable_index((1, slice(10)))), int)
True
"""
if type(index) is tuple: # can't do isinstance due to hashable_list
return tuple(map(hashable_index, index))
elif isinstance(index, list):
return hashable_list(index)
elif isinstance(index, slice):
return _slice(index.start, index.stop, index.step)
return index
def replace_slices(index):
"""
Takes input from Slice expression and returns either a list,
slice object, or tuple.
Examples
-------
>>> replace_slices([1, 2, 345, 12])
[1, 2, 345, 12]
>>> type(replace_slices(_slice(1, 5, None))) is slice
True
>>> type(replace_slices((2, 5))) is tuple
True
"""
if isinstance(index, hashable_list):
return list(index)
elif isinstance(index, _slice):
return index.as_slice()
elif isinstance(index, tuple):
return tuple(map(replace_slices, index))
return index
def maxvar(L):
"""
>>> maxvar([1, 2, var])
Var()
>>> maxvar([1, 2, 3])
3
"""
if var in L:
return var
else:
return max(L)
def maxshape(shapes):
"""
>>> maxshape([(10, 1), (1, 10), ()])
(10, 10)
>>> maxshape([(4, 5), (5,)])
(4, 5)
"""
shapes = [shape for shape in shapes if shape]
if not shapes:
return ()
ndim = max(map(len, shapes))
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
for dims in zip(*shapes):
if len(set(dims) - set([1])) >= 2:
raise ValueError("Shapes don't align, %s" % str(dims))
return tuple(map(maxvar, zip(*shapes)))
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/utils.py",
"copies": "3",
"size": "2812",
"license": "bsd-3-clause",
"hash": -7991022400516487000,
"line_mean": 22.4333333333,
"line_max": 72,
"alpha_frac": 0.5480085349,
"autogenerated": false,
"ratio": 3.5505050505050506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 120
} |
from __future__ import absolute_import, division, print_function
from datashape.predicates import isscalar
from .compute.sql import select
from .data.sql import SQL, dispatch
from .expr import Expr, Projection, Field, UnaryOp, BinOp, Join
from .data.sql import SQL, dispatch
from .compatibility import basestring
from .resource import resource
from toolz import first
import sqlalchemy as sa
__all__ = 'SQL',
@dispatch((Field, Projection, Expr, UnaryOp), SQL)
def compute_up(t, ddesc, **kwargs):
return compute_up(t, ddesc.table, **kwargs)
@dispatch((BinOp, Join), SQL, sa.sql.Selectable)
def compute_up(t, lhs, rhs, **kwargs):
return compute_up(t, lhs.table, rhs, **kwargs)
@dispatch((BinOp, Join), sa.sql.Selectable, SQL)
def compute_up(t, lhs, rhs, **kwargs):
return compute_up(t, lhs, rhs.table, **kwargs)
@dispatch((BinOp, Join), SQL, SQL)
def compute_up(t, lhs, rhs, **kwargs):
return compute_up(t, lhs.table, rhs.table, **kwargs)
@dispatch(Expr, sa.sql.ClauseElement, dict)
def post_compute(expr, query, d):
""" Execute SQLAlchemy query against SQLAlchemy engines
If the result of compute is a SQLAlchemy query then it is likely that the
data elements are themselves SQL objects which contain SQLAlchemy engines.
We find these engines and, if they are all the same, run the query against
these engines and return the result.
"""
if not all(isinstance(val, SQL) for val in d.values()):
return query
engines = set([dd.engine for dd in d.values()])
if len(set(map(str, engines))) != 1:
raise NotImplementedError("Expected single SQLAlchemy engine")
engine = first(engines)
with engine.connect() as conn: # Perform query
result = conn.execute(select(query)).fetchall()
if isscalar(expr.dshape):
return result[0][0]
if isscalar(expr.dshape.measure):
return [x[0] for x in result]
return result
@dispatch(SQL)
def drop(s):
s.table.drop(s.engine)
@dispatch(SQL, basestring)
def create_index(s, column, name=None, unique=False):
if name is None:
raise ValueError('SQL indexes must have a name')
sa.Index(name, getattr(s.table.c, column), unique=unique).create(s.engine)
@dispatch(SQL, list)
def create_index(s, columns, name=None, unique=False):
if name is None:
raise ValueError('SQL indexes must have a name')
args = name,
args += tuple(getattr(s.table.c, column) for column in columns)
sa.Index(*args, unique=unique).create(s.engine)
@resource.register('(sqlite|postgresql|mysql|mysql\+pymysql)://.+')
def resource_sql(uri, table_name, *args, **kwargs):
return SQL(uri, table_name, *args, **kwargs)
@resource.register('impala://.+')
def resource_sql(uri, table_name, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return SQL(uri, table_name, *args, **kwargs)
| {
"repo_name": "vitan/blaze",
"path": "blaze/sql.py",
"copies": "1",
"size": "2960",
"license": "bsd-3-clause",
"hash": -4958628582000986000,
"line_mean": 30.1578947368,
"line_max": 78,
"alpha_frac": 0.689527027,
"autogenerated": false,
"ratio": 3.4946871310507674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4684214158050768,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datashape.predicates import isscalar
from multipledispatch import MDNotImplementedError
from .expressions import *
from .strings import *
from .arithmetic import *
from .collections import *
from .split_apply_combine import *
from .broadcast import *
from .reductions import *
from ..dispatch import dispatch
def lean_projection(expr):
""" Insert projections to keep dataset as thin as possible
>>> t = symbol('t', 'var * {a: int, b: int, c: int, d: int}')
>>> lean_projection(t.sort('a').b)
t[['a', 'b']].sort('a', ascending=True).b
"""
fields = expr.fields
return _lean(expr, fields=fields)[0]
@dispatch(Symbol)
def _lean(expr, fields=None):
"""
>>> s = symbol('s', '{x: int, y: int}')
>>> _lean(s, ('x',))
(s['x'], ('x',))
>>> _lean(s, ())
(s, ())
>>> s = symbol('s', 'int')
>>> _lean(s, ())
(s, ())
>>> _lean(s, ('s',))
(s, ())
"""
if not fields or set(expr.fields).issubset(fields):
return expr, fields
else:
return expr[sorted(fields)], fields
@dispatch(Projection)
def _lean(expr, fields=None):
child, _ = _lean(expr._child, fields=fields)
return child[sorted(fields, key=expr.fields.index)], fields
@dispatch(Field)
def _lean(expr, fields=None):
fields = set(fields)
fields.add(expr._name)
child, _ = _lean(expr._child, fields=fields)
return child[expr._name], fields
@dispatch(Arithmetic)
def _lean(expr, fields=None):
lhs, right_fields = _lean(expr.lhs, fields=())
rhs, left_fields = _lean(expr.rhs, fields=())
new_fields = set(fields) | set(left_fields) | set(right_fields)
return type(expr)(lhs, rhs), new_fields
@dispatch(object)
def _lean(expr, fields=None):
return expr, fields
@dispatch(Label)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child, fields=())
return child.label(expr._name), new_fields
@dispatch(ReLabel)
def _lean(expr, fields=None):
labels = dict(expr.labels)
reverse_labels = dict((v, k) for k, v in expr.labels)
child_fields = set(reverse_labels.get(f, f) for f in fields)
child, new_fields = _lean(expr._child, fields=child_fields)
return child.relabel(**dict((k, v) for k, v in expr.labels if k in
child.fields)), new_fields
@dispatch(ElemWise)
def _lean(expr, fields=None):
if isscalar(expr._child.dshape.measure):
child, _ = _lean(expr._child, fields=set(expr._child.fields))
return expr._subs({expr._child: child}), set(expr._child.fields)
else:
raise MDNotImplementedError()
@dispatch(Broadcast)
def _lean(expr, fields=None):
fields = set(fields) | set(expr.active_columns())
child, _ = _lean(expr._child, fields=fields)
return expr._subs({expr._child: child}), fields
@dispatch(Selection)
def _lean(expr, fields=None):
predicate, pred_fields = _lean(expr.predicate, fields=fields)
fields = set(fields) | set(pred_fields)
child, _ = _lean(expr._child, fields=fields)
return expr._subs({expr._child: child}), fields
@dispatch(Like)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child,
fields=set(fields) | set(expr.patterns.keys()))
return expr._subs({expr._child: child}), new_fields
@dispatch(Sort)
def _lean(expr, fields=None):
key = expr.key
if not isinstance(key, (list, set, tuple)):
key = [key]
new_fields = set(fields) | set(key)
child, _ = _lean(expr._child, fields=new_fields)
return child.sort(key=expr.key, ascending=expr.ascending), new_fields
@dispatch(Head)
def _lean(expr, fields=None):
child, child_fields = _lean(expr._child, fields=fields)
return child.head(expr.n), child_fields
@dispatch(Reduction)
def _lean(expr, fields=None):
child = expr._child
try:
fields = child.active_columns()
except AttributeError:
fields = child.fields
child, child_fields = _lean(child, fields=set(filter(None, fields)))
return expr._subs({expr._child: child}), child_fields
@dispatch(Summary)
def _lean(expr, fields=None):
save = dict()
new_fields = set()
for name, val in zip(expr.names, expr.values):
if name not in fields:
continue
child, child_fields = _lean(val, fields=set())
save[name] = child
new_fields |= set(child_fields)
return summary(**save), new_fields
@dispatch(By)
def _lean(expr, fields=None):
fields = set(fields)
grouper, grouper_fields = _lean(expr.grouper,
fields=fields.intersection(expr.grouper.fields))
apply, apply_fields = _lean(expr.apply,
fields=fields.intersection(expr.apply.fields))
new_fields = set(apply_fields) | set(grouper_fields)
child = common_subexpression(grouper, apply)
if len(child.fields) > len(new_fields):
child, _ = _lean(child, fields=new_fields)
grouper = grouper._subs({expr._child: child})
apply = apply._subs({expr._child: child})
return By(grouper, apply), new_fields
@dispatch(Distinct)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child, fields=expr.fields)
return expr._subs({expr._child: child}), new_fields
@dispatch(Merge)
def _lean(expr, fields=None):
new_fields = set()
for f in expr.fields:
if f not in fields:
continue
le, nf = _lean(expr[f], fields=set([f]))
new_fields.update(nf)
child, _ = _lean(expr._child, fields=new_fields)
return expr._subs({expr._child: child})[sorted(fields)], new_fields
@dispatch(Expr)
def _lean(expr, fields=None):
""" Lean projection version of expression
Parameters
----------
expr : Expression
An expression to be optimized
fields : Iterable of strings
The fields that will be needed from this expression
Returns
-------
expr : Expression
An expression with Projections inserted to avoid unnecessary fields
fields : Iterable of strings
The fields that this expression requires to execute
"""
raise NotImplementedError()
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/optimize.py",
"copies": "1",
"size": "6207",
"license": "bsd-3-clause",
"hash": 5919590489342532000,
"line_mean": 25.8701298701,
"line_max": 84,
"alpha_frac": 0.6286450781,
"autogenerated": false,
"ratio": 3.4617958728388176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9589151624019239,
"avg_score": 0.00025786538391580407,
"num_lines": 231
} |
from __future__ import absolute_import, division, print_function
from ..data_slice_widget import SliceWidget
class TestSliceWidget(object):
def test_slice_center(self):
s = SliceWidget(lo=0, hi=10)
assert s.state.slice_center == 5
def test_browse_slice(self):
s = SliceWidget(lo=0, hi=10)
assert s.state.slice_center == 5
s.button_prev.click()
assert s.state.slice_center == 4
s.button_next.click()
s.button_next.click()
assert s.state.slice_center == 6
s.button_first.click()
assert s.state.slice_center == 0
s.button_prev.click()
assert s.state.slice_center == 10
s.button_next.click()
assert s.state.slice_center == 0
s.button_last.click()
assert s.state.slice_center == 10
s.button_next.click()
assert s.state.slice_center == 0
s.button_prev.click()
assert s.state.slice_center == 10
s.button_prev.click()
assert s.state.slice_center == 9
def test_slice_world(self):
s = SliceWidget(lo=0, hi=5, world=[1, 3, 5, 5.5, 8, 12])
# Check switching between world and pixel coordinates
s.state.slice_center = 0
assert s.state.slider_label == '1.0'
s.state.use_world = False
assert s.state.slider_label == '0'
s.state.slice_center = 3
assert s.state.slider_label == '3'
s.state.use_world = True
assert s.state.slider_label == '5.5'
# Round to nearest
s.state.slider_label = '11'
assert s.state.slice_center == 5
assert s.state.slider_label == '12.0'
# Make sure out of bound values work
s.state.slider_label = '20'
assert s.state.slice_center == 5
assert s.state.slider_label == '12.0'
s.state.slider_label = '-10'
assert s.state.slice_center == 0
assert s.state.slider_label == '1.0'
# And disable world and try and set by pixel
s.state.use_world = False
s.state.slider_label = '4'
assert s.state.slice_center == 4
assert s.state.slider_label == '4'
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/common/qt/tests/test_data_slice_widget.py",
"copies": "3",
"size": "2161",
"license": "bsd-3-clause",
"hash": 372229072169675650,
"line_mean": 31.7424242424,
"line_max": 64,
"alpha_frac": 0.5821378991,
"autogenerated": false,
"ratio": 3.4576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 66
} |
from __future__ import absolute_import, division, print_function
from .data_viewer import DataViewer
from ...external.qt.QtGui import QTableView
from ...external.qt.QtCore import Qt, QAbstractTableModel
import numpy as np
class DataTableModel(QAbstractTableModel):
def __init__(self, data):
super(DataTableModel, self).__init__()
self._data = data
self.show_hidden = False
@property
def columns(self):
if self.show_hidden:
return self._data.components
else:
return self._data.visible_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
#Qt bug: Crashes on tables bigger than this
return min(self._data.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self.columns[section].label
elif orientation == Qt.Vertical:
return str(section)
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
c = self.columns[index.column()]
idx = np.unravel_index([index.row()], self._data.shape)
return str(self._data[c, idx][0])
class TableWidget(DataViewer):
def __init__(self, session, parent=None):
super(TableWidget, self).__init__(session, parent)
self.widget = QTableView()
self.setCentralWidget(self.widget)
hdr = self.widget.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setResizeMode(hdr.Interactive)
hdr = self.widget.verticalHeader()
hdr.setResizeMode(hdr.Interactive)
def __str__(self):
return "Table Widget"
def unregister(self, hub):
pass
def add_data(self, data):
self.set_data(data)
return True
def add_subset(self, subset):
self.set_data(subset.data)
return True
def set_data(self, data):
self.setUpdatesEnabled(False)
model = DataTableModel(data)
self.widget.setModel(model)
self.setUpdatesEnabled(True)
def closeEvent(self, event):
"""
On close, QT seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
from ...core import Data
d = Data(x=[0])
self.widget.setModel(DataTableModel(d))
event.accept()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/table_widget.py",
"copies": "1",
"size": "2577",
"license": "bsd-3-clause",
"hash": 5055864405747915000,
"line_mean": 27.0108695652,
"line_max": 67,
"alpha_frac": 0.6131160264,
"autogenerated": false,
"ratio": 4.05188679245283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516500281885283,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import date, datetime, time
from decimal import Decimal
from dynd import nd
import sqlalchemy as sql
import datashape
from ..utils import partition_all
from ..py2help import basestring
from .core import DataDescriptor
from .utils import coerce_row_to_dict
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {'int64': sql.types.BigInteger,
'int32': sql.types.Integer,
'int': sql.types.Integer,
'int16': sql.types.SmallInteger,
'float': sql.types.Float,
'string': sql.types.String, # Probably just use only this
# 'date': sql.types.Date,
# 'time': sql.types.Time,
# 'datetime': sql.types.DateTime,
# bool: sql.types.Boolean,
# ??: sql.types.LargeBinary,
# Decimal: sql.types.Numeric,
# ??: sql.types.PickleType,
# unicode: sql.types.Unicode,
# unicode: sql.types.UnicodeText,
# str: sql.types.Text, # ??
}
def dshape_to_alchemy(dshape):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.String'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', String(), table=None), Column('amount', Integer(), table=None)]
"""
dshape = datashape.dshape(dshape)
if str(dshape) in types:
return types[str(dshape)]
try:
return [sql.Column(name, dshape_to_alchemy(typ))
for name, typ in dshape.parameters[0].parameters[0]]
except TypeError:
raise NotImplementedError("Datashape not supported for SQL Schema")
class SQL(DataDescriptor):
"""
A Blaze data descriptor to expose a SQL database.
>>> dd = SQL('sqlite:///:memory:', 'accounts',
... schema='{name: string, amount: int}')
Insert into database
>>> dd.extend([('Alice', 100), ('Bob', 200)])
Select all from table
>>> list(dd)
[(u'Alice', 100), (u'Bob', 200)]
Verify that we're actually touching the database
>>> with dd.engine.connect() as conn:
... print(list(conn.execute('SELECT * FROM accounts')))
[(u'Alice', 100), (u'Bob', 200)]
Parameters
----------
engine : string, A SQLAlchemy engine
uri of database
or SQLAlchemy engine
table : string
The name of the table
schema : string, list of Columns
The datashape/schema of the database
Possibly a list of SQLAlchemy columns
"""
immutable = False
deferred = False
appendable = True
@property
def remote(self):
return self.engine.dialect.name != 'sqlite'
@property
def persistent(self):
return self.engine.url != 'sqlite:///:memory:'
def __init__(self, engine, tablename, primary_key='', schema=None):
if isinstance(engine, basestring):
engine = sql.create_engine(engine)
self.engine = engine
self.tablename = tablename
if isinstance(schema, (str, datashape.DataShape)):
columns = dshape_to_alchemy(schema)
for column in columns:
if column.name == primary_key:
column.primary_key = True
if schema is None: # Table must exist
if not engine.has_table(tablename):
raise ValueError('Must provide schema. Table %s does not exist'
% tablename)
self._schema = datashape.dshape(schema)
metadata = sql.MetaData()
table = sql.Table(tablename, metadata, *columns)
self.table = table
metadata.create_all(engine)
def __iter__(self):
with self.engine.connect() as conn:
result = conn.execute(sql.sql.select([self.table]))
for item in result:
yield item
@property
def dshape(self):
return datashape.Var() * self.schema
def extend(self, rows):
rows = (coerce_row_to_dict(self.schema, row)
if isinstance(row, (tuple, list)) else row
for row in rows)
with self.engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(self.table.insert(), chunk)
def chunks(self, blen=1000):
for chunk in partition_all(blen, iter(self)):
dshape = str(len(chunk)) + ' * ' + str(self.schema)
yield nd.array(chunk, dtype=dshape)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/sql.py",
"copies": "1",
"size": "4563",
"license": "bsd-3-clause",
"hash": -1072653869107948300,
"line_mean": 29.8310810811,
"line_max": 83,
"alpha_frac": 0.594126671,
"autogenerated": false,
"ratio": 3.910025706940874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004152377940874,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime, date, timedelta
from math import floor
import sys
def identity(x):
return x
def asday(dt):
if isinstance(dt, datetime):
return dt.date()
else:
return dt
def asweek(dt):
if isinstance(dt, datetime):
dt = dt.date()
return dt - timedelta(days=dt.isoweekday() - 1)
def ashour(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, tzinfo=dt.tzinfo)
def asminute(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, tzinfo=dt.tzinfo)
def assecond(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, tzinfo=dt.tzinfo)
def asmillisecond(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond // 1000, tzinfo=dt.tzinfo)
if sys.version_info < (2, 7):
def total_seconds(td):
""" Total seconds of a timedelta
For Python 2.6 compatibility
"""
return (td.microseconds + 1e6 * (td.seconds + 24 * 3600 * td.days)) / 1e6
else:
total_seconds = timedelta.total_seconds
unit_map = {'year': 'asyear',
'month': 'asmonth',
'week': 'asweek',
'day': 'asday',
'hour': 'ashour',
'minute': 'asminute',
'second': 'assecond',
'millisecond': 'asmillisecond',
'microsecond': identity}
def truncate_year(dt, measure):
"""
Truncate by years
>>> dt = datetime(2003, 6, 25, 12, 30, 0)
>>> truncate_year(dt, 1)
datetime.date(2003, 1, 1)
>>> truncate_year(dt, 5)
datetime.date(2000, 1, 1)
"""
return date(dt.year // measure * measure, 1, 1)
def truncate_month(dt, measure):
"""
Truncate by months
>>> dt = datetime(2000, 10, 25, 12, 30, 0)
>>> truncate_month(dt, 1)
datetime.date(2000, 10, 1)
>>> truncate_month(dt, 4)
datetime.date(2000, 8, 1)
"""
months = dt.year * 12 + dt.month
months = months // measure * measure
return date((months - 1) // 12, (months -1) % 12 + 1, 1)
def truncate_day(dt, measure):
"""
Truncate by days
>>> dt = datetime(2000, 6, 27, 12, 30, 0)
>>> truncate_day(dt, 1)
datetime.date(2000, 6, 27)
>>> truncate_day(dt, 3)
datetime.date(2000, 6, 25)
"""
days = dt.toordinal()
days = days // measure * measure
return date.fromordinal(days)
oneday = timedelta(days=1)
def truncate_week(dt, measure):
"""
Truncate by weeks
>>> dt = datetime(2000, 6, 22, 12, 30, 0)
>>> truncate_week(dt, 1)
datetime.date(2000, 6, 18)
>>> truncate_week(dt, 3)
datetime.date(2000, 6, 4)
Weeks are defined by having isoweekday == 7 (Sunday)
>>> truncate_week(dt, 1).isoweekday()
7
"""
return truncate_day(dt, measure * 7)
epoch = datetime.utcfromtimestamp(0)
def utctotimestamp(dt):
"""
Convert a timestamp to seconds
>>> dt = datetime(2000, 1, 1)
>>> utctotimestamp(dt)
946684800.0
>>> datetime.utcfromtimestamp(946684800)
datetime.datetime(2000, 1, 1, 0, 0)
"""
return total_seconds(dt - epoch)
def truncate_minute(dt, measure):
"""
Truncate by minute
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_minute(dt, 1)
datetime.datetime(2000, 1, 1, 12, 30)
>>> truncate_minute(dt, 12)
datetime.datetime(2000, 1, 1, 12, 24)
"""
return asminute(truncate_second(dt, measure * 60))
def truncate_hour(dt, measure):
"""
Truncate by hour
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_hour(dt, 1)
datetime.datetime(2000, 1, 1, 12, 0)
>>> truncate_hour(dt, 5)
datetime.datetime(2000, 1, 1, 10, 0)
"""
return ashour(truncate_second(dt, measure * 3600))
def truncate_second(dt, measure):
"""
Truncate by second
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_second(dt, 15)
datetime.datetime(2000, 1, 1, 12, 30, 30)
"""
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) // measure * measure
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
def truncate_millisecond(dt, measure):
"""
Truncate by millisecond
>>> dt = datetime(2000, 1, 1, 12, 30, 38, 12345)
>>> truncate_millisecond(dt, 5)
datetime.datetime(2000, 1, 1, 12, 30, 38, 10000)
"""
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) * 1000 // measure * measure / 1000. + 1e-7
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
def truncate_microsecond(dt, measure):
"""
Truncate by microsecond
>>> dt = datetime(2000, 1, 1, 12, 30, 38, 12345)
>>> truncate_microsecond(dt, 100)
datetime.datetime(2000, 1, 1, 12, 30, 38, 12300)
"""
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) * 1000000 // measure * measure / 1000000
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
truncate_functions = {'year': truncate_year,
'month': truncate_month,
'week': truncate_week,
'day': truncate_day,
'hour': truncate_hour,
'minute': truncate_minute,
'second': truncate_second,
'millisecond': truncate_millisecond,
'microsecond': truncate_microsecond}
def truncate(dt, measure, unit):
""" Truncate datetimes
Example
-------
>>> dt = datetime(2003, 6, 25, 12, 30, 0)
>>> truncate(dt, 1, 'day')
datetime.date(2003, 6, 25)
>>> truncate(dt, 5, 'hours')
datetime.datetime(2003, 6, 25, 10, 0)
>>> truncate(dt, 3, 'months')
datetime.date(2003, 6, 1)
"""
from blaze.expr.datetime import normalize_time_unit
unit = normalize_time_unit(unit)
return truncate_functions[unit](dt, measure)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/pydatetime.py",
"copies": "1",
"size": "6077",
"license": "bsd-3-clause",
"hash": -1615627861027761000,
"line_mean": 25.4217391304,
"line_max": 86,
"alpha_frac": 0.5859799243,
"autogenerated": false,
"ratio": 3.339010989010989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9407227948620525,
"avg_score": 0.0035525929380927955,
"num_lines": 230
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime, date, timedelta
import sys
def identity(x):
return x
def asday(dt):
if isinstance(dt, datetime):
return dt.date()
else:
return dt
def asweek(dt):
if isinstance(dt, datetime):
dt = dt.date()
return dt - timedelta(days=dt.isoweekday() - 1)
def ashour(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, tzinfo=dt.tzinfo)
def asminute(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
tzinfo=dt.tzinfo)
def assecond(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, tzinfo=dt.tzinfo)
def asmillisecond(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond // 1000, tzinfo=dt.tzinfo)
if sys.version_info < (2, 7):
def total_seconds(td):
""" Total seconds of a timedelta
For Python 2.6 compatibility
"""
return (td.microseconds + 1e6 * (td.seconds + 24 * 3600 * td.days)) / 1e6
else:
total_seconds = timedelta.total_seconds
unit_map = {'year': 'asyear',
'month': 'asmonth',
'week': 'asweek',
'day': 'asday',
'hour': 'ashour',
'minute': 'asminute',
'second': 'assecond',
'millisecond': 'asmillisecond',
'microsecond': identity}
def truncate_year(dt, measure):
"""
Truncate by years
>>> dt = datetime(2003, 6, 25, 12, 30, 0)
>>> truncate_year(dt, 1)
datetime.date(2003, 1, 1)
>>> truncate_year(dt, 5)
datetime.date(2000, 1, 1)
"""
return date(dt.year // measure * measure, 1, 1)
def truncate_month(dt, measure):
"""
Truncate by months
>>> dt = datetime(2000, 10, 25, 12, 30, 0)
>>> truncate_month(dt, 1)
datetime.date(2000, 10, 1)
>>> truncate_month(dt, 4)
datetime.date(2000, 8, 1)
"""
months = dt.year * 12 + dt.month
months = months // measure * measure
return date((months - 1) // 12, (months - 1) % 12 + 1, 1)
def truncate_day(dt, measure):
"""
Truncate by days
>>> dt = datetime(2000, 6, 27, 12, 30, 0)
>>> truncate_day(dt, 1)
datetime.date(2000, 6, 27)
>>> truncate_day(dt, 3)
datetime.date(2000, 6, 25)
"""
days = dt.toordinal()
days = days // measure * measure
return date.fromordinal(days)
oneday = timedelta(days=1)
def truncate_week(dt, measure):
"""
Truncate by weeks
>>> dt = datetime(2000, 6, 22, 12, 30, 0)
>>> truncate_week(dt, 1)
datetime.date(2000, 6, 18)
>>> truncate_week(dt, 3)
datetime.date(2000, 6, 4)
Weeks are defined by having isoweekday == 7 (Sunday)
>>> truncate_week(dt, 1).isoweekday()
7
"""
return truncate_day(dt, measure * 7)
epoch = datetime.utcfromtimestamp(0)
def utctotimestamp(dt):
"""
Convert a timestamp to seconds
>>> dt = datetime(2000, 1, 1)
>>> utctotimestamp(dt)
946684800.0
>>> datetime.utcfromtimestamp(946684800)
datetime.datetime(2000, 1, 1, 0, 0)
"""
return total_seconds(dt - epoch)
def truncate_minute(dt, measure):
"""
Truncate by minute
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_minute(dt, 1)
datetime.datetime(2000, 1, 1, 12, 30)
>>> truncate_minute(dt, 12)
datetime.datetime(2000, 1, 1, 12, 24)
"""
return asminute(truncate_second(dt, measure * 60))
def truncate_hour(dt, measure):
"""
Truncate by hour
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_hour(dt, 1)
datetime.datetime(2000, 1, 1, 12, 0)
>>> truncate_hour(dt, 5)
datetime.datetime(2000, 1, 1, 10, 0)
"""
return ashour(truncate_second(dt, measure * 3600))
def truncate_second(dt, measure):
"""
Truncate by second
>>> dt = datetime(2000, 1, 1, 12, 30, 38)
>>> truncate_second(dt, 15)
datetime.datetime(2000, 1, 1, 12, 30, 30)
"""
d = datetime(
dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) // measure * measure
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
def truncate_millisecond(dt, measure):
"""
Truncate by millisecond
>>> dt = datetime(2000, 1, 1, 12, 30, 38, 12345)
>>> truncate_millisecond(dt, 5)
datetime.datetime(2000, 1, 1, 12, 30, 38, 10000)
"""
d = datetime(
dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) * 1000 // measure * measure / 1000. + 1e-7
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
def truncate_microsecond(dt, measure):
"""
Truncate by microsecond
>>> dt = datetime(2000, 1, 1, 12, 30, 38, 12345)
>>> truncate_microsecond(dt, 100)
datetime.datetime(2000, 1, 1, 12, 30, 38, 12300)
"""
d = datetime(
dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) # local zero for seconds
seconds = total_seconds(dt - d) * 1000000 // measure * measure / 1000000
return dt.utcfromtimestamp(seconds + utctotimestamp(d))
truncate_functions = {'year': truncate_year,
'month': truncate_month,
'week': truncate_week,
'day': truncate_day,
'hour': truncate_hour,
'minute': truncate_minute,
'second': truncate_second,
'millisecond': truncate_millisecond,
'microsecond': truncate_microsecond}
def truncate(dt, measure, unit):
""" Truncate datetimes
Examples
--------
>>> dt = datetime(2003, 6, 25, 12, 30, 0)
>>> truncate(dt, 1, 'day')
datetime.date(2003, 6, 25)
>>> truncate(dt, 5, 'hours')
datetime.datetime(2003, 6, 25, 10, 0)
>>> truncate(dt, 3, 'months')
datetime.date(2003, 6, 1)
"""
from blaze.expr.datetime import normalize_time_unit
unit = normalize_time_unit(unit)
return truncate_functions[unit](dt, measure)
| {
"repo_name": "caseyclements/blaze",
"path": "blaze/compute/pydatetime.py",
"copies": "16",
"size": "6133",
"license": "bsd-3-clause",
"hash": 5553525334950036000,
"line_mean": 24.2386831276,
"line_max": 81,
"alpha_frac": 0.5776944399,
"autogenerated": false,
"ratio": 3.3642347778387274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from collections import defaultdict
from toolz import merge
import bisect
import numpy as np
import pandas as pd
from .core import new_dd_object, Series
from . import methods
from ..base import tokenize
class _LocIndexer(object):
""" Helper class for the .loc accessor """
def __init__(self, obj):
self.obj = obj
@property
def _name(self):
return self.obj._name
def _make_meta(self, iindexer, cindexer):
"""
get metadata
"""
if cindexer is None:
return self.obj
else:
return self.obj._meta.loc[:, cindexer]
def __getitem__(self, key):
if isinstance(key, tuple):
# multi-dimensional selection
if len(key) > self.obj.ndim:
# raise from pandas
msg = 'Too many indexers'
raise pd.core.indexing.IndexingError(msg)
iindexer = key[0]
cindexer = key[1]
else:
# if self.obj is Series, cindexer is always None
iindexer = key
cindexer = None
return self._loc(iindexer, cindexer)
def _loc(self, iindexer, cindexer):
""" Helper function for the .loc accessor """
if isinstance(iindexer, Series):
return self._loc_series(iindexer, cindexer)
if self.obj.known_divisions:
iindexer = self._maybe_partial_time_string(iindexer)
if isinstance(iindexer, slice):
return self._loc_slice(iindexer, cindexer)
elif isinstance(iindexer, list):
return self._loc_list(iindexer, cindexer)
else:
# element should raise KeyError
return self._loc_element(iindexer, cindexer)
else:
if isinstance(iindexer, list):
# applying map_pattition to each partitions
# results in duplicated NaN rows
msg = 'Cannot index with list against unknown division'
raise KeyError(msg)
elif not isinstance(iindexer, slice):
iindexer = slice(iindexer, iindexer)
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.try_loc, iindexer, cindexer,
meta=meta)
def _maybe_partial_time_string(self, iindexer):
"""
Convert index-indexer for partial time string slicing
if obj.index is DatetimeIndex / PeriodIndex
"""
iindexer = _maybe_partial_time_string(self.obj._meta_nonempty.index,
iindexer, kind='loc')
return iindexer
def _loc_series(self, iindexer, cindexer):
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.loc, iindexer, cindexer,
token='loc-series', meta=meta)
def _loc_list(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, self.obj)
parts = self._get_partitions(iindexer)
dsk = {}
divisions = []
items = sorted(parts.items())
for i, (div, indexer) in enumerate(items):
dsk[name, i] = (methods.loc, (self._name, div),
indexer, cindexer)
# append minimum value as division
divisions.append(sorted(indexer)[0])
# append maximum value of the last division
divisions.append(sorted(items[-1][1])[-1])
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=divisions)
def _loc_element(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, self.obj)
part = self._get_partitions(iindexer)
if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(iindexer))
dsk = {(name, 0): (methods.loc, (self._name, part),
slice(iindexer, iindexer), cindexer)}
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=[iindexer, iindexer])
def _get_partitions(self, keys):
if isinstance(keys, list):
return _partitions_of_index_values(self.obj.divisions, keys)
else:
# element
return _partition_of_index_value(self.obj.divisions, keys)
def _coerce_loc_index(self, key):
return _coerce_loc_index(self.obj.divisions, key)
def _loc_slice(self, iindexer, cindexer):
name = 'loc-%s' % tokenize(iindexer, cindexer, self)
assert isinstance(iindexer, slice)
assert iindexer.step in (None, 1)
if iindexer.start is not None:
start = self._get_partitions(iindexer.start)
else:
start = 0
if iindexer.stop is not None:
stop = self._get_partitions(iindexer.stop)
else:
stop = self.obj.npartitions - 1
istart = self._coerce_loc_index(iindexer.start)
istop = self._coerce_loc_index(iindexer.stop)
if stop == start:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, iindexer.stop), cindexer)}
divisions = [istart, istop]
else:
dsk = {(name, 0): (methods.loc, (self._name, start),
slice(iindexer.start, None), cindexer)}
for i in range(1, stop - start):
if cindexer is None:
dsk[name, i] = (self._name, start + i)
else:
dsk[name, i] = (methods.loc, (self._name, start + i),
slice(None, None), cindexer)
dsk[name, stop - start] = (methods.loc, (self._name, stop),
slice(None, iindexer.stop), cindexer)
if iindexer.start is None:
div_start = self.obj.divisions[0]
else:
div_start = max(istart, self.obj.divisions[start])
if iindexer.stop is None:
div_stop = self.obj.divisions[-1]
else:
div_stop = min(istop, self.obj.divisions[stop + 1])
divisions = ((div_start, ) +
self.obj.divisions[start + 1:stop + 1] +
(div_stop, ))
assert len(divisions) == len(dsk) + 1
meta = self._make_meta(iindexer, cindexer)
return new_dd_object(merge(self.obj.dask, dsk), name,
meta=meta, divisions=divisions)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _partitions_of_index_values(divisions, values):
""" Return defaultdict of division and values pairs
Each key corresponds to the division which values are index values belong
to the division.
>>> sorted(_partitions_of_index_values([0, 5, 10], [3]).items())
[(0, [3])]
>>> sorted(_partitions_of_index_values([0, 5, 10], [3, 8, 5]).items())
[(0, [3]), (1, [8, 5])]
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
results = defaultdict(list)
values = pd.Index(values, dtype=object)
for val in values:
i = bisect.bisect_right(divisions, val)
div = min(len(divisions) - 2, max(0, i - 1))
results[div].append(val)
return results
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def _maybe_partial_time_string(index, indexer, kind):
"""
Convert indexer for partial string selection
if data has DatetimeIndex/PeriodIndex
"""
# do not pass dd.Index
assert isinstance(index, pd.Index)
if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
return indexer
if isinstance(indexer, slice):
if isinstance(indexer.start, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer.start, 'left', kind)
else:
start = indexer.start
if isinstance(indexer.stop, pd.compat.string_types):
stop = index._maybe_cast_slice_bound(indexer.stop, 'right', kind)
else:
stop = indexer.stop
return slice(start, stop)
elif isinstance(indexer, pd.compat.string_types):
start = index._maybe_cast_slice_bound(indexer, 'left', 'loc')
stop = index._maybe_cast_slice_bound(indexer, 'right', 'loc')
return slice(min(start, stop), max(start, stop))
return indexer
| {
"repo_name": "chrisbarber/dask",
"path": "dask/dataframe/indexing.py",
"copies": "1",
"size": "9724",
"license": "bsd-3-clause",
"hash": -5508190505946601000,
"line_mean": 34.36,
"line_max": 81,
"alpha_frac": 0.5700329083,
"autogenerated": false,
"ratio": 3.9099316445516688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9979518356470513,
"avg_score": 0.00008923927623114614,
"num_lines": 275
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from decimal import Decimal
import sqlalchemy as sa
import sqlalchemy.orm
from toolz import curry
from datashape.predicates import isrecord
from ..expr import Field
from odo.backends.sql import dshape_to_alchemy
# This was taken from the following StackOverflow post
# http://stackoverflow.com/questions/5631078/sqlalchemy-print-the-actual-query
# answer by bukzor http://stackoverflow.com/users/146821/bukzor
def literalquery(statement, dialect=None):
"""Generate an SQL expression string with bound parameters rendered inline
for the given SQLAlchemy statement.
WARNING: This method of escaping is insecure, incomplete, and for debugging
purposes only. Executing SQL statements with inline-rendered user values is
extremely insecure.
"""
if isinstance(statement, sqlalchemy.orm.Query):
if dialect is None:
dialect = statement.session.get_bind(
statement._mapper_zero_or_none()
).dialect
statement = statement.statement
if dialect is None:
dialect = getattr(statement.bind, 'dialect', None)
if dialect is None:
from sqlalchemy.dialects import mysql
dialect = mysql.dialect()
Compiler = type(statement._compiler(dialect))
class LiteralCompiler(Compiler):
visit_bindparam = Compiler.render_literal_bindparam
def render_literal_value(self, value, type_):
if isinstance(value, (Decimal, long)):
return str(value)
elif isinstance(value, datetime):
return repr(str(value))
else: # fallback
value = super(LiteralCompiler, self).render_literal_value(
value, type_,
)
if isinstance(value, unicode):
return value.encode('UTF-8')
else:
return value
return LiteralCompiler(dialect, statement)
def make_sqlalchemy_table(expr):
return sa.Table(expr._name, sa.MetaData(), *dshape_to_alchemy(expr.dshape))
@curry
def istable(db, t):
return (isinstance(t, Field) and
isrecord(t.dshape.measure) and
t._child.isidentical(db))
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/utils.py",
"copies": "3",
"size": "2286",
"license": "bsd-3-clause",
"hash": -7117790029585822000,
"line_mean": 32.6176470588,
"line_max": 79,
"alpha_frac": 0.6614173228,
"autogenerated": false,
"ratio": 4.3961538461538465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from django.db.models import Q
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from sentry.app import search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.constants import (
DEFAULT_SORT_OPTION, STATUS_CHOICES, STATUS_UNRESOLVED
)
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, EventMapping, Group, GroupBookmark, GroupSeen, GroupStatus, TagKey
)
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
from sentry.utils.apidocs import scenario, attach_scenarios
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
@scenario('BulkUpdateAggregates')
def bulk_update_aggregates_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/groups/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved', 'isPublic': False}
)
@scenario('BulkRemoveAggregates')
def bulk_remove_aggregates_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/groups/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectAggregates')
def list_project_aggregates_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/groups/?statsPeriod=24h' % (
runner.org.slug, project.slug),
)
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
merge = serializers.BooleanField()
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _parse_date(self, value):
try:
return datetime.utcfromtimestamp(float(value)).replace(
tzinfo=timezone.utc,
)
except ValueError:
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ').replace(
tzinfo=timezone.utc,
)
# bookmarks=0/1
# status=<x>
# <tag>=<value>
# statsPeriod=24h
@attach_scenarios([list_project_aggregates_scenario])
def get(self, request, project):
"""
List a Project's Aggregates
```````````````````````````
Return a list of aggregates bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:resolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:resolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:auth: required
"""
query_kwargs = {
'project': project,
}
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "invalid status"}', status=400)
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
# TODO: dates should include timestamps
date_from = request.GET.get('since')
date_to = request.GET.get('until')
date_filter = request.GET.get('date_filter')
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
return Response('{"detail": "invalid limit"}', status=400)
if date_from:
date_from = self._parse_date(date_from)
if date_to:
date_to = self._parse_date(date_to)
query_kwargs['date_from'] = date_from
query_kwargs['date_to'] = date_to
if date_filter:
query_kwargs['date_filter'] = date_filter
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if len(query) == 32:
# check to see if we've got an event ID
try:
matching_event = EventMapping.objects.filter(
project=project,
event_id=query,
).select_related('group')[0]
except IndexError:
pass
else:
return Response(serialize(
[matching_event.group], request.user, StreamGroupSerializer(
stats_period=stats_period
)
))
if query is not None:
query_kwargs.update(parse_query(query, request.user))
cursor_result = search.query(**query_kwargs)
results = list(cursor_result)
# HACK: remove auto resolved entries
if query_kwargs.get('status') == STATUS_UNRESOLVED:
results = [
r for r in results
if not r.is_resolved()
]
response = Response(serialize(
results, request.user, StreamGroupSerializer(
stats_period=stats_period
)
))
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
@attach_scenarios([bulk_update_aggregates_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Aggregates
````````````````````````````````
Bulk mutate various attributes on aggregates. The list of groups
to modify is given through the `id` query parameter. It is repeated
for each group that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the groups to be mutated. This
parameter shall be repeated for each group. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to groups of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"muted"``.
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:param string status: the new status for the groups. Valid values
are ``"resolved"``, ``"unresolved"`` and
``"muted"``.
:param boolean isPublic: sets the group to public or private.
:param boolean merge: allows to merge or unmerge different groups.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
# validate that we've passed a selector for non-status bulk operations
if not group_ids and result.keys() != ['status']:
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if group_ids:
filters = [Q(id__in=group_ids)]
else:
filters = [Q(project=project)]
if request.GET.get('status'):
try:
status_filter = STATUS_CHOICES[request.GET['status']]
except KeyError:
return Response('{"detail": "Invalid status"}', status=400)
filters.append(Q(status=status_filter))
if result.get('status') == 'resolved':
now = timezone.now()
happened = Group.objects.filter(*filters).exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=request.user,
)
activity.send_notification()
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = Group.objects.filter(*filters).exclude(
status=new_status,
).update(
status=new_status,
)
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
elif new_status == GroupStatus.MUTED:
activity_type = Activity.SET_MUTED
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=request.user,
)
activity.send_notification()
if result.get('hasSeen') and project.member_set.filter(user=request.user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=request.user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=group.project,
group=group,
user=request.user,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=request.user,
).delete()
if result.get('isPublic'):
Group.objects.filter(
id__in=group_ids,
).update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=request.user,
)
elif result.get('isPublic') is False:
Group.objects.filter(
id__in=group_ids,
).update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=request.user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
)
result['merge'] = {
'parent': str(primary_group.id),
'children': [str(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_aggregates_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Aggregates
````````````````````````````````
Permanently remove the given aggregates. The list of groups to
modify is given through the `id` query parameter. It is repeated
for each group that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the groups to be removed. This
parameter shall be repeated for each group.
:pparam string organization_slug: the slug of the organization the
groups belong to.
:pparam string project_slug: the slug of the project the groups
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
# TODO(dcramer): set status to pending deletion
for group in group_list:
delete_group.delay(object_id=group.id)
return Response(status=204)
| {
"repo_name": "korealerts1/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "7",
"size": "18003",
"license": "bsd-3-clause",
"hash": -2326929412703641600,
"line_mean": 36.8214285714,
"line_max": 106,
"alpha_frac": 0.5486863301,
"autogenerated": false,
"ratio": 4.619707467282525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001891911368969157,
"num_lines": 476
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.