text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import operator
from toolz import first
import numpy as np
import pandas as pd
from datashape import (
DataShape,
DateTime,
Option,
String,
TimeDelta,
coretypes as ct,
datetime_,
discover,
dshape,
optionify,
promote,
timedelta_,
unsigned,
)
from datashape.predicates import isscalar, isboolean, isnumeric, isdatelike
from datashape.typesets import integral
from dateutil.parser import parse as dt_parse
from .core import parenthesize, eval_str
from .expressions import Expr, shape, ElemWise, binop_inputs, binop_name
from .utils import maxshape
from ..dispatch import dispatch
from ..compatibility import _strtypes
__all__ = '''
BinOp
UnaryOp
Arithmetic
Add
Mult
Repeat
Sub
Div
FloorDiv
Pow
Mod
Interp
USub
Relational
Eq
Ne
Ge
Lt
Le
Gt
Gt
And
Or
Not
'''.split()
class BinOp(ElemWise):
_arguments = 'lhs', 'rhs'
_input_attributes = 'lhs', 'rhs'
def __str__(self):
lhs = parenthesize(eval_str(self.lhs))
rhs = parenthesize(eval_str(self.rhs))
return '%s %s %s' % (lhs, self.symbol, rhs)
def _dshape(self):
# TODO: better inference. e.g. int + int -> int
return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) +
(self._dtype,)))
_name = property(binop_name)
@property
def _inputs(self):
return tuple(binop_inputs(self))
class UnaryOp(ElemWise):
_arguments = '_child',
def __str__(self):
return '%s(%s)' % (self.symbol, eval_str(self._child))
@property
def symbol(self):
return type(self).__name__
def _dshape(self):
return DataShape(*(shape(self._child) + (self._dtype,)))
@property
def _name(self):
return self._child._name
class Arithmetic(BinOp):
""" Super class for arithmetic operators like add or mul """
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return promote(lhs, rhs)
class Add(Arithmetic):
symbol = '+'
op = operator.add
@property
def _dtype(self):
lmeasure = discover(self.lhs).measure
lty = getattr(lmeasure, 'ty', lmeasure)
rmeasure = discover(self.rhs).measure
rty = getattr(rmeasure, 'ty', rmeasure)
if lmeasure == datetime_ and rmeasure == datetime_:
raise TypeError('cannot add datetime to datetime')
l_is_datetime = lty == datetime_
if l_is_datetime or rty == datetime_:
if l_is_datetime:
other = rty
else:
other = lty
if isinstance(other, TimeDelta):
return optionify(lmeasure, rmeasure, datetime_)
else:
raise TypeError(
'can only add timedeltas to datetimes',
)
return super(Add, self)._dtype
class Mult(Arithmetic):
symbol = '*'
op = operator.mul
class Repeat(Arithmetic):
# Sequence repeat
symbol = '*'
op = operator.mul
@property
def _dtype(self):
lmeasure = discover(self.lhs).measure
rmeasure = discover(self.rhs).measure
if not (isinstance(getattr(lmeasure, 'ty', lmeasure), String) and
getattr(rmeasure, 'ty', rmeasure) in integral):
raise TypeError(
'can only repeat strings by an integer amount, got: %s * %s' %
(lmeasure, rmeasure),
)
return optionify(lmeasure, rmeasure, lmeasure)
class Sub(Arithmetic):
symbol = '-'
op = operator.sub
@property
def _dtype(self):
lmeasure = discover(self.lhs).measure
lty = getattr(lmeasure, 'ty', lmeasure)
rmeasure = discover(self.rhs).measure
rty = getattr(rmeasure, 'ty', rmeasure)
if lty == datetime_:
if isinstance(rty, DateTime):
return optionify(lmeasure, rmeasure, timedelta_)
if isinstance(rty, TimeDelta):
return optionify(lmeasure, rmeasure, datetime_)
else:
raise TypeError(
'can only subtract timedelta or datetime from datetime',
)
return super(Sub, self)._dtype
class Div(Arithmetic):
symbol = '/'
op = operator.truediv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
return optionify(lhs, rhs, ct.float64)
class FloorDiv(Arithmetic):
symbol = '//'
op = operator.floordiv
@property
def _dtype(self):
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
is_unsigned = lhs in unsigned and rhs in unsigned
max_width = max(lhs.itemsize, rhs.itemsize)
prefix = 'u' if is_unsigned else ''
measure = getattr(ct, '%sint%d' % (prefix, max_width * 8))
return optionify(lhs, rhs, measure)
class Pow(Arithmetic):
symbol = '**'
op = operator.pow
class Mod(Arithmetic):
symbol = '%'
op = operator.mod
class Interp(Arithmetic):
# String interpolation
symbol = '%'
op = operator.mod
@property
def _dtype(self):
lmeasure = discover(self.lhs).measure
rmeasure = discover(self.rhs).measure
if not (isinstance(getattr(lmeasure, 'ty', lmeasure), String)):
raise TypeError('can only interp strings got: %s' % lmeasure)
return optionify(lmeasure, rmeasure, lmeasure)
class USub(UnaryOp):
op = operator.neg
symbol = '-'
def __str__(self):
return '-%s' % parenthesize(eval_str(self._child))
@property
def _dtype(self):
# TODO: better inference. -uint -> int
return self._child.schema
@dispatch(ct.Option, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.ty, val) if val is not None else None
@dispatch((ct.Record, ct.Mono, ct.Option, DataShape), Expr)
def scalar_coerce(ds, val):
return val
@dispatch(ct.Date, _strtypes)
def scalar_coerce(_, val):
if val == '':
raise TypeError('%r is not a valid date' % val)
dt = dt_parse(val)
if any(x > 0 for x in (dt.hour, dt.minute, dt.second, dt.microsecond)):
msg = "Can not coerce %r to type Date, contains time information"
raise TypeError(msg % val)
return dt.date()
@dispatch(ct.DateTime, _strtypes)
def scalar_coerce(_, val):
if val == '':
raise TypeError('%r is not a valid datetime' % val)
return pd.Timestamp(val)
@dispatch(ct.CType, _strtypes)
def scalar_coerce(dt, val):
return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))
@dispatch(ct.Record, object)
def scalar_coerce(rec, val):
if len(rec.fields) == 1:
return scalar_coerce(first(rec.types), val)
else:
raise TypeError("Trying to coerce complex datashape\n"
"got dshape: %s\n"
"scalar_coerce only intended for scalar values" % rec)
@dispatch(ct.DataShape, object)
def scalar_coerce(ds, val):
return scalar_coerce(ds.measure, val)
@dispatch(object, object)
def scalar_coerce(dtype, val):
return val
@dispatch(_strtypes, object)
def scalar_coerce(ds, val):
return scalar_coerce(dshape(ds), val)
def _neg(self):
return USub(self)
def _mkbin(name, cons, private=True, reflected=True):
prefix = '_' if private else ''
def _bin(self, other):
result = cons(self, scalar_coerce(self.dshape, other))
result.dshape # Check that shapes and dtypes match up
return result
_bin.__name__ = prefix + name
if reflected:
def _rbin(self, other):
result = cons(scalar_coerce(self.dshape, other), self)
result.dshape # Check that shapes and dtypes match up
return result
_rbin.__name__ = prefix + 'r' + name
return _bin, _rbin
return _bin
_add, _radd = _mkbin('add', Add)
_div, _rdiv = _mkbin('div', Div)
_floordiv, _rfloordiv = _mkbin('floordiv', FloorDiv)
_mod, _rmod = _mkbin('mod', Mod)
_mul, _rmul = _mkbin('mul', Mult)
_pow, _rpow = _mkbin('pow', Pow)
repeat = _mkbin('repeat', Repeat, reflected=False, private=False)
_sub, _rsub = _mkbin('sub', Sub)
interp = _mkbin('interp', Interp, reflected=False, private=False)
class _Optional(Arithmetic):
@property
def _dtype(self):
# we can't simply use .schema or .datashape because we may have a bare
# integer, for example
lhs, rhs = discover(self.lhs).measure, discover(self.rhs).measure
if isinstance(lhs, Option) or isinstance(rhs, Option):
return Option(ct.bool_)
return ct.bool_
class Relational(_Optional):
# Leave this to separate relationals from other types of optionals.
pass
class Eq(Relational):
symbol = '=='
op = operator.eq
class Ne(Relational):
symbol = '!='
op = operator.ne
class Ge(Relational):
symbol = '>='
op = operator.ge
class Le(Relational):
symbol = '<='
op = operator.le
class Gt(Relational):
symbol = '>'
op = operator.gt
class Lt(Relational):
symbol = '<'
op = operator.lt
class And(_Optional):
symbol = '&'
op = operator.and_
class Or(_Optional):
symbol = '|'
op = operator.or_
class Not(UnaryOp):
symbol = '~'
op = operator.invert
@property
def _dtype(self):
return self._child.schema
def __str__(self):
return '~%s' % parenthesize(eval_str(self._child))
_and, _rand = _mkbin('and', And)
_eq = _mkbin('eq', Eq, reflected=False)
_ge = _mkbin('ge', Ge, reflected=False)
_gt = _mkbin('gt', Gt, reflected=False)
_le = _mkbin('le', Le, reflected=False)
_lt = _mkbin('lt', Lt, reflected=False)
_ne = _mkbin('ne', Ne, reflected=False)
_or, _ror = _mkbin('or', Or)
def _invert(self):
result = Invert(self)
result.dshape # Check that shapes and dtypes match up
return result
Invert = Not
BitAnd = And
BitOr = Or
from .expressions import schema_method_list
schema_method_list.extend([
(isnumeric,
set([_add, _radd, _mul, _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub,
_rsub, _pow, _rpow, _mod, _rmod, _neg])),
(isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),
(isboolean, set([_or, _ror, _and, _rand, _invert])),
(isdatelike, set([_add, _radd, _sub, _rsub])),
])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/arithmetic.py",
"copies": "3",
"size": "10491",
"license": "bsd-3-clause",
"hash": 9065358955550436000,
"line_mean": 22.6283783784,
"line_max": 78,
"alpha_frac": 0.6053760366,
"autogenerated": false,
"ratio": 3.5181086519114686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003381646691505846,
"num_lines": 444
} |
from __future__ import absolute_import, division, print_function
import operator
import bisect
from . import DDesc, Capabilities
def cat_descriptor_iter(ddlist):
for i, dd in enumerate(ddlist):
for el in dd:
yield el
class Cat_DDesc(DDesc):
"""
A Blaze data descriptor which concatenates a list
of data descriptors, all of which have the same
dshape after the first dimension.
This presently doesn't support leading dimensions
whose size is unknown (i.e. streaming dimensions).
"""
def __init__(self, ddlist):
if len(ddlist) <= 1:
raise ValueError('Need at least 2 data descriptors to concatenate')
for dd in ddlist:
if not isinstance(dd, DDesc):
raise ValueError('Provided ddlist has an element '
'which is not a data descriptor')
self._ddlist = ddlist
self._dshape = ds.cat_dshapes([dd.dshape for dd in ddlist])
self._ndim = len(self._dshape[:]) - 1
# Create a list of boundary indices
boundary_index = [0]
for dd in ddlist:
dim_size = operator.index(dd.dshape[0])
boundary_index.append(dim_size + boundary_index[-1])
self._boundary_index = boundary_index
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the cat data descriptor."""
return Capabilities(
immutable = True,
deferred = True,
# persistency is not supported yet
persistent = False,
appendable = False,
remote = False,
)
def __len__(self):
return self._boundary_index[-1]
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
# Just integer indices (no slices) for now
boundary_index = self._boundary_index
dim_size = boundary_index[-1]
# TODO: Handle a slice in key[0] too!
idx0 = operator.index(key[0])
# Determine which data descriptor in the list to use
if idx0 >= 0:
if idx0 >= dim_size:
raise IndexError(('Index %d is out of range '
'in dimension sized %d') % (idx0, dim_size))
else:
if idx0 < -dim_size:
raise IndexError(('Index %d is out of range '
'in dimension sized %d') % (idx0, dim_size))
idx0 += dim_size
i = bisect.bisect_right(boundary_index, idx0) - 1
# Call the i-th data descriptor to get the result
return self._ddlist[i][(idx0 - boundary_index[i],) + key[1:]]
def __iter__(self):
return cat_descriptor_iter(self._ddlist)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/datadescriptor/cat_data_descriptor.py",
"copies": "3",
"size": "2808",
"license": "bsd-3-clause",
"hash": -2543626018772729000,
"line_mean": 32.8313253012,
"line_max": 79,
"alpha_frac": 0.5658831909,
"autogenerated": false,
"ratio": 4.166172106824926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6232055297724926,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import bisect
from . import IDataDescriptor, Capabilities
def cat_descriptor_iter(ddlist):
for i, dd in enumerate(ddlist):
for el in dd:
yield el
class CatDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which concatenates a list
of data descriptors, all of which have the same
dshape after the first dimension.
This presently doesn't support leading dimensions
whose size is unknown (i.e. streaming dimensions).
"""
def __init__(self, ddlist):
if len(ddlist) <= 1:
raise ValueError('Need at least 2 data descriptors to concatenate')
for dd in ddlist:
if not isinstance(dd, IDataDescriptor):
raise ValueError('Provided ddlist has an element '
'which is not a data descriptor')
self._ddlist = ddlist
self._dshape = ds.cat_dshapes([dd.dshape for dd in ddlist])
self._ndim = len(self._dshape[:]) - 1
# Create a list of boundary indices
boundary_index = [0]
for dd in ddlist:
dim_size = operator.index(dd.dshape[0])
boundary_index.append(dim_size + boundary_index[-1])
self._boundary_index = boundary_index
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the cat data descriptor."""
return Capabilities(
immutable = True,
deferred = True,
# persistency is not supported yet
persistent = False,
appendable = False,
remote = False,
)
def __len__(self):
return self._boundary_index[-1]
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
# Just integer indices (no slices) for now
boundary_index = self._boundary_index
dim_size = boundary_index[-1]
# TODO: Handle a slice in key[0] too!
idx0 = operator.index(key[0])
# Determine which data descriptor in the list to use
if idx0 >= 0:
if idx0 >= dim_size:
raise IndexError(('Index %d is out of range '
'in dimension sized %d') % (idx0, dim_size))
else:
if idx0 < -dim_size:
raise IndexError(('Index %d is out of range '
'in dimension sized %d') % (idx0, dim_size))
idx0 += dim_size
i = bisect.bisect_right(boundary_index, idx0) - 1
# Call the i-th data descriptor to get the result
return self._ddlist[i][(idx0 - boundary_index[i],) + key[1:]]
def __iter__(self):
return cat_descriptor_iter(self._ddlist)
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/datadescriptor/cat_data_descriptor.py",
"copies": "10",
"size": "2846",
"license": "bsd-3-clause",
"hash": 6632273761971945000,
"line_mean": 33.2891566265,
"line_max": 79,
"alpha_frac": 0.5720309206,
"autogenerated": false,
"ratio": 4.210059171597633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004496220439303115,
"num_lines": 83
} |
from __future__ import absolute_import, division, print_function
import operator
import logging
import numpy as np
import pandas as pd
from .contracts import contract
from .coordinates import Coordinates
from .visual import VisualAttributes
from .visual import COLORS
from .exceptions import IncompatibleAttribute
from .component_link import (ComponentLink, CoordinateComponentLink,
BinaryComponentLink)
from .subset import Subset, InequalitySubsetState, SubsetState
from .hub import Hub
from .util import split_component_view, row_lookup
from ..utils import unique, shape_to_string, view_shape, coerce_numeric, check_sorted
from .decorators import clear_cache
from .message import (DataUpdateMessage,
DataAddComponentMessage, NumericalDataChangedMessage,
SubsetCreateMessage, ComponentsChangedMessage,
ComponentReplacedMessage)
from ..compat.collections import OrderedDict
from ..external import six
__all__ = ['Data', 'ComponentID', 'Component', 'DerivedComponent',
'CategoricalComponent', 'CoordinateComponent']
# access to ComponentIDs via .item[name]
class ComponentIDDict(object):
def __init__(self, data, **kwargs):
self.data = data
def __getitem__(self, key):
result = self.data.find_component_id(key)
if result is None:
raise KeyError("ComponentID not found or not unique: %s"
% key)
return result
class ComponentID(object):
""" References a :class:`Component` object within a :class:`Data` object.
ComponentIDs behave as keys::
component_id = data.id[name]
data[component_id] -> numpy array
"""
def __init__(self, label, hidden=False):
""":param label: Name for the ID
:type label: str"""
self._label = str(label)
self._hidden = hidden
@property
def label(self):
return self._label
@label.setter
def label(self, value):
"""Change label.
.. warning::
Label changes are not currently tracked by client
classes. Label's should only be changd before creating other
client objects
"""
self._label = str(value)
@property
def hidden(self):
"""Whether to hide the component by default"""
return self._hidden
def __str__(self):
return str(self._label)
def __repr__(self):
return str(self._label)
def __eq__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.eq)
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __ne__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.ne)
return other is not self
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Notes
-----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
"""
:param data: The data to store
:type data: :class:`numpy.ndarray`
:param units: Optional unit label
:type units: str
"""
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def units(self):
return self._units
@units.setter
def units(self, value):
self._units = str(value)
@property
def hidden(self):
"""Whether the Component is hidden by default"""
return False
@property
def data(self):
""" The underlying :class:`numpy.ndarray` """
return self._data
@property
def shape(self):
""" Tuple of array dimensions """
return self._data.shape
@property
def ndim(self):
""" The number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric
"""
return np.can_cast(self.data[0], np.complex)
@property
def categorical(self):
"""
Whether or not the datatype is categorical
"""
return False
def __str__(self):
return "Component with shape %s" % shape_to_string(self.shape)
def jitter(self, method=None):
raise NotImplementedError
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
:param data: The data to pack into a Component (array-like)
:param units: Optional units
:type units: str
:returns: A Component (or subclass)
"""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.object_):
return CategoricalComponent(data, units=units)
n = coerce_numeric(data)
thresh = 0.5
try:
use_categorical = np.issubdtype(data.dtype, np.character) and \
np.isfinite(n).mean() <= thresh
except TypeError: # isfinite not supported. non-numeric dtype
use_categorical = True
if use_categorical:
return CategoricalComponent(data, units=units)
else:
return Component(n, units=units)
class DerivedComponent(Component):
""" A component which derives its data from a function """
def __init__(self, data, link, units=None):
"""
:param data: The data object to use for calculation
:type data: :class:`~glue.core.data.Data`
:param link: The link that carries out the function
:type link: :class:`~glue.core.component_link.ComponentLink`
:param units: Optional unit description
"""
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def hidden(self):
return self._link.hidden
@property
def data(self):
""" Return the numerical data as a numpy array """
return self._link.compute(self._data)
@property
def link(self):
""" Return the component link """
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
super(CoordinateComponent, self).__init__(None, None)
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
def _calculate(self, view=None):
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
if self.world:
world = self._data.coords.pixel2world(*grids[::-1])[::-1]
return world[self.axis]
else:
return grids[self.axis]
@property
def shape(self):
""" Tuple of array dimensions. """
return self._data.shape
@property
def ndim(self):
""" Number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
class CategoricalComponent(Component):
"""
Container for categorical data.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
"""
:param categorical_data: The underlying :class:`numpy.ndarray`
:param categories: List of unique values in the data
:jitter: Strategy for jittering the data
"""
super(CategoricalComponent, self).__init__(None, units)
self._categorical_data = np.asarray(categorical_data)
if self._categorical_data.ndim > 1:
raise ValueError("Categorical Data must be 1-dimensional")
# Disable changing of categories
self._categorical_data.setflags(write=False)
self._categories = categories
self._jitter_method = jitter
self._is_jittered = False
self._data = None
if self._categories is None:
self._update_categories()
else:
self._update_data()
@property
def categorical(self):
return True
def _update_categories(self, categories=None):
"""
:param categories: A sorted array of categories to find in the dataset.
If None the categories are the unique items in the data.
:return: None
"""
if categories is None:
categories, inv = unique(self._categorical_data)
self._categories = categories
self._data = inv.astype(np.float)
self._data.setflags(write=False)
self.jitter(method=self._jitter_method)
else:
if check_sorted(categories):
self._categories = categories
self._update_data()
else:
raise ValueError("Provided categories must be Sorted")
def _update_data(self):
"""
Converts the categorical data into the numeric representations given
self._categories
"""
self._is_jittered = False
self._data = row_lookup(self._categorical_data, self._categories)
self.jitter(method=self._jitter_method)
self._data.setflags(write=False)
def jitter(self, method=None):
"""
Jitter the data so the density of points can be easily seen in a
scatter plot.
:param method: None | 'uniform':
* None: No jittering is done (or any jittering is undone).
* uniform: A unformly distributed random variable (-0.5, 0.5)
is applied to each point.
:return: None
"""
if method not in set(['uniform', None]):
raise ValueError('%s jitter not supported' % method)
self._jitter_method = method
seed = 1234567890
rand_state = np.random.RandomState(seed)
if (self._jitter_method is None) and self._is_jittered:
self._update_data()
elif (self._jitter_method is 'uniform') and not self._is_jittered:
iswrite = self._data.flags['WRITEABLE']
self._data.setflags(write=True)
self._data += rand_state.uniform(-0.5, 0.5, size=self._data.shape)
self._is_jittered = True
self._data.setflags(write=iswrite)
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self._categorical_data.ravel(),
dtype=np.object, **kwargs)
class Data(object):
"""The basic data container in Glue.
The data object stores data as a collection of
:class:`~glue.core.data.Component` objects. Each component stored in a
dataset must have the same shape.
Catalog data sets are stored such that each column is a distinct
1-dimensional :class:`~glue.core.data.Component`.
There are several ways to extract the actual numerical data stored in a
:class:`~glue.core.data.Data` object::
data = Data(x=[1, 2, 3], label='data')
xid = data.id['x']
data[xid]
data.get_component(xid).data
data['x'] # if 'x' is a unique component name
Likewise, datasets support :ref:`fancy indexing <numpy:basics.indexing>`::
data[xid, 0:2]
data[xid, [True, False, True]]
See also: :ref:`data_tutorial`
"""
def __init__(self, label="", **kwargs):
"""
:param label: label for data
:type label: str
Extra array-like keywords are extracted into components
"""
# Coordinate conversion object
self.coords = Coordinates()
self._shape = ()
# Components
self._components = OrderedDict()
self._pixel_component_ids = []
self._world_component_ids = []
self.id = ComponentIDDict(self)
# Subsets of the data
self._subsets = []
# Hub that the data is attached to
self.hub = None
self.style = VisualAttributes(parent=self)
self._coordinate_links = None
self.data = self
self.label = label
self.edit_subset = None
for lbl, data in sorted(kwargs.items()):
self.add_component(data, lbl)
self._key_joins = {}
@property
def subsets(self):
"""
Tuple of subsets attached to this dataset
"""
return tuple(self._subsets)
@property
def ndim(self):
"""
Dimensionality of the dataset
"""
return len(self.shape)
@property
def shape(self):
"""
Tuple of array dimensions, like :attr:`numpy.ndarray.shape`
"""
return self._shape
@property
def label(self):
""" Convenience access to data set's label """
return self._label
@label.setter
def label(self, value):
""" Set the label to value
"""
self._label = value
self.broadcast(attribute='label')
@property
def size(self):
"""
Total number of elements in the dataset.
"""
return np.product(self.shape)
@contract(component=Component)
def _check_can_add(self, component):
if isinstance(component, DerivedComponent):
return component._data is self
else:
if len(self._components) == 0:
return True
return component.shape == self.shape
@contract(cid=ComponentID, returns=np.dtype)
def dtype(self, cid):
"""Lookup the dtype for the data associated with a ComponentID"""
# grab a small piece of data
ind = tuple([slice(0, 1)] * self.ndim)
arr = self[cid, ind]
return arr.dtype
@contract(component_id=ComponentID)
def remove_component(self, component_id):
""" Remove a component from a data set
:param component_id: the component to remove
:type component_id: :class:`~glue.core.data.ComponentID`
"""
if component_id in self._components:
self._components.pop(component_id)
@contract(other='isinstance(Data)',
cid='cid_like',
cid_other='cid_like')
def join_on_key(self, other, cid, cid_other):
"""
Create an *element* mapping to another dataset, by
joining on values of ComponentIDs in both datasets.
This join allows any subsets defined on `other` to be
propagated to self.
:param other: :class:`Data` to join with
:param cid: str or :class:`ComponentID` in this dataset to use as a key
:param cid_other: ComponentID in the other dataset to use as a key
:example:
>>> d1 = Data(x=[1, 2, 3, 4, 5], k1=[0, 0, 1, 1, 2], label='d1')
>>> d2 = Data(y=[2, 4, 5, 8, 4], k2=[1, 3, 1, 2, 3], label='d2')
>>> d2.join_on_key(d1, 'k2', 'k1')
>>> s = d1.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([False, False, True, True, True], dtype=bool)
>>> s = d2.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([ True, False, True, True, False], dtype=bool)
The subset state selects the last 3 items in d1. These have
key values k1 of 1 and 2. Thus, the selected items in d2
are the elements where k2 = 1 or 2.
"""
_i1, _i2 = cid, cid_other
cid = self.find_component_id(cid)
cid_other = other.find_component_id(cid_other)
if cid is None:
raise ValueError("ComponentID not found in %s: %s" %
(self.label, _i1))
if cid_other is None:
raise ValueError("ComponentID not found in %s: %s" %
(other.label, _i2))
self._key_joins[other] = (cid, cid_other)
other._key_joins[self] = (cid_other, cid)
@contract(component='component_like', label='cid_like')
def add_component(self, component, label, hidden=False):
""" Add a new component to this data set.
:param component: object to add. Can be a Component,
array-like object, or ComponentLink
:param label:
The label. If this is a string,
a new :class:`ComponentID` with this label will be
created and associated with the Component
:type component: :class:`~glue.core.data.Component` or
array-like
:type label: :class:`str` or :class:`~glue.core.data.ComponentID`
:raises:
TypeError, if label is invalid
ValueError if the component has an incompatible shape
:returns:
The ComponentID associated with the newly-added component
"""
if isinstance(component, ComponentLink):
component = DerivedComponent(self, component)
if not isinstance(component, Component):
component = Component.autotyped(component)
if isinstance(component, DerivedComponent):
component.set_parent(self)
if not(self._check_can_add(component)):
raise ValueError("The dimensions of component %s are "
"incompatible with the dimensions of this data: "
"%r vs %r" % (label, component.shape, self.shape))
if isinstance(label, ComponentID):
component_id = label
else:
component_id = ComponentID(label, hidden=hidden)
is_present = component_id in self._components
self._components[component_id] = component
first_component = len(self._components) == 1
if first_component:
if isinstance(component, DerivedComponent):
raise TypeError("Cannot add a derived component as "
"first component")
self._shape = component.shape
self._create_pixel_and_world_components()
if self.hub and (not is_present):
msg = DataAddComponentMessage(self, component_id)
self.hub.broadcast(msg)
msg = ComponentsChangedMessage(self)
self.hub.broadcast(msg)
return component_id
@contract(link=ComponentLink,
label='cid_like|None',
returns=DerivedComponent)
def add_component_link(self, link, label=None):
""" Shortcut method for generating a new :class:`DerivedComponent`
from a ComponentLink object, and adding it to a data set.
:param link: :class:`~glue.core.component_link.ComponentLink`
:param label: The ComponentID or label to attach to.
:type label: :class:`~glue.core.data.ComponentID` or str
:returns:
The :class:`DerivedComponent` that was added
"""
if label is not None:
if not isinstance(label, ComponentID):
label = ComponentID(label)
link.set_to_id(label)
if link.get_to_id() is None:
raise TypeError("Cannot add component_link: "
"has no 'to' ComponentID")
dc = DerivedComponent(self, link)
to_ = link.get_to_id()
self.add_component(dc, to_)
return dc
def _create_pixel_and_world_components(self):
for i in range(self.ndim):
comp = CoordinateComponent(self, i)
label = pixel_label(i, self.ndim)
cid = self.add_component(comp, "Pixel %s" % label, hidden=True)
self._pixel_component_ids.append(cid)
if self.coords:
for i in range(self.ndim):
comp = CoordinateComponent(self, i, world=True)
label = self.coords.axis_label(i)
cid = self.add_component(comp, label, hidden=True)
self._world_component_ids.append(cid)
@property
def components(self):
""" All :class:`ComponentIDs <ComponentID>` in the Data
:rtype: list
"""
return sorted(self._components.keys(), key=lambda x: x.label)
@property
def visible_components(self):
""" :class:`ComponentIDs <ComponentID>` for all non-hidden components.
:rtype: list
"""
return [cid for cid, comp in self._components.items()
if not cid.hidden and not comp.hidden]
@property
def primary_components(self):
"""The ComponentIDs not associated with a :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
not isinstance(self._components[c], DerivedComponent)]
@property
def derived_components(self):
"""The ComponentIDs for each :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
isinstance(self._components[c], DerivedComponent)]
@property
def pixel_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each pixel coordinate.
"""
return self._pixel_component_ids
@property
def world_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each world coordinate.
"""
return self._world_component_ids
@contract(label='cid_like', returns='inst($ComponentID)|None')
def find_component_id(self, label):
""" Retrieve component_ids associated by label name.
:param label: ComponentID or string to search for
:returns:
The associated ComponentID if label is found and unique, else None
"""
result = [cid for cid in self.component_ids() if
cid.label == label or cid is label]
if len(result) == 1:
return result[0]
@property
def coordinate_links(self):
"""A list of the ComponentLinks that connect pixel and
world. If no coordinate transformation object is present,
return an empty list.
"""
if self._coordinate_links:
return self._coordinate_links
if not self.coords:
return []
if self.ndim != len(self._pixel_component_ids) or \
self.ndim != len(self._world_component_ids):
# haven't populated pixel, world coordinates yet
return []
def make_toworld_func(i):
def pix2world(*args):
return self.coords.pixel2world(*args[::-1])[::-1][i]
return pix2world
def make_topixel_func(i):
def world2pix(*args):
return self.coords.world2pixel(*args[::-1])[::-1][i]
return world2pix
result = []
for i in range(self.ndim):
link = CoordinateComponentLink(self._pixel_component_ids,
self._world_component_ids[i],
self.coords, i)
result.append(link)
link = CoordinateComponentLink(self._world_component_ids,
self._pixel_component_ids[i],
self.coords, i, pixel2world=False)
result.append(link)
self._coordinate_links = result
return result
@contract(axis=int, returns=ComponentID)
def get_pixel_component_id(self, axis):
"""Return the pixel :class:`ComponentID` associated with a given axis
"""
return self._pixel_component_ids[axis]
@contract(axis=int, returns=ComponentID)
def get_world_component_id(self, axis):
"""Return the world :class:`ComponentID` associated with a given axis
"""
return self._world_component_ids[axis]
@contract(returns='list(inst($ComponentID))')
def component_ids(self):
"""
Equivalent to :attr:`Data.components`
"""
return list(self._components.keys())
@contract(subset='isinstance(Subset)|None',
color='color|None',
label='string|None',
returns=Subset)
def new_subset(self, subset=None, color=None, label=None, **kwargs):
"""
Create a new subset, and attach to self.
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
:param subset: optional, reference subset or subset state.
If provided, the new subset will copy the logic of
this subset.
:returns: The new subset object
"""
nsub = len(self.subsets)
color = color or COLORS[nsub % len(COLORS)]
label = label or "%s.%i" % (self.label, nsub + 1)
new_subset = Subset(self, color=color, label=label, **kwargs)
if subset is not None:
new_subset.subset_state = subset.subset_state.copy()
self.add_subset(new_subset)
return new_subset
@contract(subset='inst($Subset, $SubsetState)')
def add_subset(self, subset):
"""Assign a pre-existing subset to this data object.
:param subset: A :class:`~glue.core.subset.Subset` or
:class:`~glue.core.subset.SubsetState` object
If input is a :class:`~glue.core.subset.SubsetState`,
it will be wrapped in a new Subset automatically
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
"""
if subset in self.subsets:
return # prevents infinite recursion
if isinstance(subset, SubsetState):
# auto-wrap state in subset
state = subset
subset = Subset(None)
subset.subset_state = state
self._subsets.append(subset)
if subset.data is not self:
subset.do_broadcast(False)
subset.data = self
subset.label = subset.label # hacky. disambiguates name if needed
if self.hub is not None:
msg = SubsetCreateMessage(subset)
self.hub.broadcast(msg)
subset.do_broadcast(True)
@contract(hub=Hub)
def register_to_hub(self, hub):
""" Connect to a hub.
This method usually doesn't have to be called directly, as
DataCollections manage the registration of data objects
"""
if not isinstance(hub, Hub):
raise TypeError("input is not a Hub object: %s" % type(hub))
self.hub = hub
@contract(attribute='string')
def broadcast(self, attribute):
"""
Send a :class:`~glue.core.message.DataUpdateMessage` to the hub
:param attribute: Name of an attribute that has changed (or None)
:type attribute: string
"""
if not self.hub:
return
msg = DataUpdateMessage(self, attribute=attribute)
self.hub.broadcast(msg)
@contract(old=ComponentID, new=ComponentID)
def update_id(self, old, new):
"""Reassign a component to a different :class:`ComponentID`
:param old: The old :class:`ComponentID`.
:param new: The new :class:`ComponentID`.
"""
if new is old:
return
changed = False
if old in self._components:
self._components[new] = self._components[old]
changed = True
try:
index = self._pixel_component_ids.index(old)
self._pixel_component_ids[index] = new
changed = True
except ValueError:
pass
try:
index = self._world_component_ids.index(old)
self._world_component_ids[index] = new
changed = True
except ValueError:
pass
if changed and self.hub is not None:
# promote hidden status
new._hidden = new.hidden and old.hidden
# remove old component and broadcast the change
# see #508 for discussion of this
self._components.pop(old)
msg = ComponentReplacedMessage(self, old, new)
self.hub.broadcast(msg)
def __str__(self):
s = "Data Set: %s" % self.label
s += "Number of dimensions: %i\n" % self.ndim
s += "Shape: %s\n" % ' x '.join([str(x) for x in self.shape])
s += "Components:\n"
for i, component in enumerate(self._components):
s += " %i) %s\n" % (i, component)
return s[:-1]
def __repr__(self):
return 'Data (label: %s)' % self.label
def __setattr__(self, name, value):
if name == "hub" and hasattr(self, 'hub') \
and self.hub is not value and self.hub is not None:
raise AttributeError("Data has already been assigned "
"to a different hub")
object.__setattr__(self, name, value)
def __getitem__(self, key):
""" Shortcut syntax to access the numerical data in a component.
Equivalent to:
``component = data.get_component(component_id).data``
:param key:
The component to fetch data from
:type key: :class:`~glue.core.data.ComponentID`
:returns: :class:`~numpy.ndarray`
"""
key, view = split_component_view(key)
if isinstance(key, six.string_types):
_k = key
key = self.find_component_id(key)
if key is None:
raise IncompatibleAttribute(_k)
if isinstance(key, ComponentLink):
return key.compute(self, view)
try:
comp = self._components[key]
except KeyError:
raise IncompatibleAttribute(key)
shp = view_shape(self.shape, view)
if view is not None:
result = comp[view]
else:
result = comp.data
assert result.shape == shp, \
"Component view returned bad shape: %s %s" % (result.shape, shp)
return result
def __setitem__(self, key, value):
"""
Wrapper for data.add_component()
"""
self.add_component(value, key)
@contract(component_id='cid_like|None', returns=Component)
def get_component(self, component_id):
"""Fetch the component corresponding to component_id.
:param component_id: the component_id to retrieve
"""
if component_id is None:
raise IncompatibleAttribute()
if isinstance(component_id, six.string_types):
component_id = self.id[component_id]
try:
return self._components[component_id]
except KeyError:
raise IncompatibleAttribute(component_id)
def to_dataframe(self, index=None):
""" Convert the Data object into a pandas.DataFrame object
:param index: Any 'index-like' object that can be passed to the pandas.Series constructor
:return: pandas.DataFrame
"""
h = lambda comp: self.get_component(comp).to_series(index=index)
df = pd.DataFrame(dict((comp.label, h(comp)) for comp in self.components))
order = [comp.label for comp in self.components]
return df[order]
@contract(mapping="dict(inst($Component, $ComponentID):array_like)")
def update_components(self, mapping):
"""
Change the numerical data associated with some of the Components
in this Data object.
All changes to component numerical data should use this method,
which broadcasts the state change to the appropriate places.
:param mapping: A dict mapping Components or ComponenIDs to arrays.
This method has the following restrictions:
- New compoments must have the same shape as old compoments
- Component subclasses cannot be updated.
"""
for comp, data in mapping.items():
if isinstance(comp, ComponentID):
comp = self.get_component(comp)
data = np.asarray(data)
if data.shape != self.shape:
raise ValueError("Cannot change shape of data")
comp._data = data
# alert hub of the change
if self.hub is not None:
msg = NumericalDataChangedMessage(self)
self.hub.broadcast(msg)
for subset in self.subsets:
clear_cache(subset.subset_state.to_mask)
@contract(i=int, ndim=int)
def pixel_label(i, ndim):
if ndim == 2:
return ['y', 'x'][i]
if ndim == 3:
return ['z', 'y', 'x'][i]
return "Axis %s" % i
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/data.py",
"copies": "1",
"size": "36278",
"license": "bsd-3-clause",
"hash": 4162764025858854000,
"line_mean": 30.4367417678,
"line_max": 97,
"alpha_frac": 0.5859474061,
"autogenerated": false,
"ratio": 4.263485721001293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349433127101293,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import numbers
import numpy as np
from .visual import VisualAttributes, RED
from .decorators import memoize
from .message import SubsetDeleteMessage, SubsetUpdateMessage
from .exceptions import IncompatibleAttribute
from .registry import Registry
from .util import split_component_view
from ..utils import view_shape
from ..external.six import PY3
from .contracts import contract
from .roi import CategoricalRoi
__all__ = ['Subset', 'SubsetState', 'RoiSubsetState', 'CompositeSubsetState',
'OrState', 'AndState', 'XorState', 'InvertState',
'ElementSubsetState', 'RangeSubsetState']
OPSYM = {operator.ge: '>=', operator.gt: '>',
operator.le: '<=', operator.lt: '<',
operator.and_: '&', operator.or_: '|',
operator.xor: '^', operator.eq: '==',
operator.ne: '!='}
SYMOP = dict((v, k) for k, v in OPSYM.items())
class Subset(object):
"""Base class to handle subsets of data.
These objects both describe subsets of a dataset, and relay any
state changes to the hub that their parent data are assigned to.
This base class only directly impements the logic that relays
state changes back to the hub. Subclasses implement the actual
description and manipulation of data subsets
:param data:
The dataset that this subset describes
:type data: :class:`~glue.core.data.Data`
"""
@contract(data='isinstance(Data)|None',
color='color',
alpha=float,
label='string|None')
def __init__(self, data, color=RED, alpha=0.5, label=None):
""" Create a new subset object.
Note: the preferred way for creating subsets is
via DataCollection.new_subset_group. Manually-instantiated
subsets will probably *not* be represented properly by the UI
"""
self._broadcasting = False # must be first def
self.data = data
self._subset_state = None
self._label = None
self._style = None
self._setup(color, alpha, label)
@contract(color='color', alpha='float', label='string|None')
def _setup(self, color, alpha, label):
self.color = color
self.label = label # trigger disambiguation
self.style = VisualAttributes(parent=self)
self.style.markersize *= 1.5
self.style.color = color
self.style.alpha = alpha
self.subset_state = SubsetState() # calls proper setter method
@property
def subset_state(self):
return self._subset_state
@subset_state.setter
def subset_state(self, state):
if isinstance(state, np.ndarray):
if self.data.shape != state.shape:
raise ValueError("Shape of mask doesn't match shape of data")
cids = self.data.pixel_component_ids
state = MaskSubsetState(state, cids)
if not isinstance(state, SubsetState):
raise TypeError("State must be a SubsetState instance or array")
self._subset_state = state
@property
def style(self):
return self._style
@style.setter
@contract(value=VisualAttributes)
def style(self, value):
value.parent = self
self._style = value
@property
def label(self):
""" Convenience access to subset's label """
return self._label
@label.setter
def label(self, value):
"""Set the subset's label
Subset labels within a data object must be unique. The input
will be auto-disambiguated if necessary
"""
value = Registry().register(self, value, group=self.data)
self._label = value
@property
def attributes(self):
"""
Returns a tuple of the ComponentIDs that this subset
depends upon
"""
return self.subset_state.attributes
def register(self):
""" Register a subset to its data, and start broadcasting
state changes
"""
self.data.add_subset(self)
self.do_broadcast(True)
@contract(returns='array[N]')
def to_index_list(self):
"""
Convert the current subset to a list of indices. These index
the elements in the (flattened) data object that belong to the subset.
If x is the numpy array corresponding to some component.data,
the two following statements are equivalent::
x.flat[subset.to_index_list()]
x[subset.to_mask()]
Returns:
A numpy array, giving the indices of elements in the data that
belong to this subset.
Raises:
IncompatibleDataException: if an index list cannot be created
for the requested data set.
"""
try:
return self.subset_state.to_index_list(self.data)
except IncompatibleAttribute as exc:
try:
return self._to_index_list_join()
except IncompatibleAttribute:
raise exc
def _to_index_list_join(self):
return np.where(self._to_mask_join(None).flat)[0]
def _to_mask_join(self, view):
"""Conver the subset to a mask through an entity join
to another dataset. """
for other, (cid1, cid2) in self.data._key_joins.items():
if getattr(other, '_recursing', False):
continue
try:
self.data._recursing = True
s2 = Subset(other)
s2.subset_state = self.subset_state
key_right = s2.to_mask()
except IncompatibleAttribute:
continue
finally:
self.data._recursing = False
key_left = self.data[cid1, view]
result = np.in1d(key_left.ravel(),
other[cid2, key_right])
return result.reshape(key_left.shape)
raise IncompatibleAttribute
@contract(view='array_view', returns='array')
def to_mask(self, view=None):
"""
Convert the current subset to a mask.
:param view: An optional view into the dataset (e.g. a slice)
If present, the mask will pertain to the view and not the
entire dataset.
Returns:
A boolean numpy array, the same shape as the data, that
defines whether each element belongs to the subset.
"""
try:
return self.subset_state.to_mask(self.data, view)
except IncompatibleAttribute as exc:
try:
return self._to_mask_join(view)
except IncompatibleAttribute:
raise exc
@contract(value=bool)
def do_broadcast(self, value):
"""
Set whether state changes to the subset are relayed to a hub.
It can be useful to turn off broadcasting, when modifying the
subset in ways that don't impact any of the clients.
Attributes:
value: Whether the subset should broadcast state changes (True/False)
"""
object.__setattr__(self, '_broadcasting', value)
@contract(attribute='string')
def broadcast(self, attribute):
"""
Explicitly broadcast a SubsetUpdateMessage to the hub
:param attribute:
The name of the attribute (if any) that should be
broadcast as updated.
:type attribute: ``str``
"""
if not hasattr(self, 'data') or not hasattr(self.data, 'hub'):
return
if self._broadcasting and self.data.hub:
msg = SubsetUpdateMessage(self, attribute=attribute)
self.data.hub.broadcast(msg)
def delete(self):
"""Broadcast a SubsetDeleteMessage to the hub, and stop broadcasting
Also removes subset reference from parent data's subsets list
"""
dobroad = self._broadcasting and self.data is not None and \
self.data.hub is not None
self.do_broadcast(False)
if self.data is not None and self in self.data.subsets:
self.data._subsets.remove(self)
if dobroad:
msg = SubsetDeleteMessage(self)
self.data.hub.broadcast(msg)
Registry().unregister(self, group=self.data)
@contract(file_name='string')
def write_mask(self, file_name, format="fits"):
""" Write a subset mask out to file
:param file_name: name of file to write to
:param format:
Name of format to write to. Currently, only "fits" is
supported
"""
mask = np.short(self.to_mask())
if format == 'fits':
try:
from ..external.astro import fits
fits.writeto(file_name, mask, clobber=True)
except ImportError:
raise ImportError("Cannot write mask -- requires astropy")
else:
raise AttributeError("format not supported: %s" % format)
@contract(file_name='string')
def read_mask(self, file_name):
try:
from ..external.astro import fits
mask = fits.open(file_name)[0].data
except ImportError:
raise ImportError("Cannot read mask -- requires astropy")
except IOError:
raise IOError("Could not read %s (not a fits file?)" % file_name)
ind = np.where(mask.flat)[0]
state = ElementSubsetState(indices=ind)
self.subset_state = state
def __del__(self):
self.delete()
def __setattr__(self, attribute, value):
object.__setattr__(self, attribute, value)
if not attribute.startswith('_'):
self.broadcast(attribute)
def __getitem__(self, view):
""" Retrieve the elements from a data view within the subset
:param view: View of the data. See data.__getitem__ for detils
"""
c, v = split_component_view(view)
ma = self.to_mask(v)
return self.data[view][ma]
@contract(other_subset='isinstance(Subset)')
def paste(self, other_subset):
"""paste subset state from other_subset onto self """
state = other_subset.subset_state.copy()
self.subset_state = state
def __str__(self):
dlabel = "(no data)"
if self.data is not None:
dlabel = "(data: %s)" % self.data.label
slabel = "Subset: (no label)"
if self.label:
slabel = "Subset: %s" % self.label
return "%s %s" % (slabel, dlabel)
def __repr__(self):
return self.__str__()
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __or__(self, other):
return _combine([self, other], operator.or_)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __and__(self, other):
return _combine([self, other], operator.and_)
@contract(returns='isinstance(Subset)')
def __invert__(self):
return _combine([self], operator.invert)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __xor__(self, other):
return _combine([self, other], operator.xor)
def __eq__(self, other):
if not isinstance(other, Subset):
return False
# XXX need to add equality specification for subset states
return (self.subset_state == other.subset_state and
self.style == other.style)
def state_as_mask(self):
"""
Convert the current SubsetState to a MaskSubsetState
"""
try:
m = self.to_mask()
except IncompatibleAttribute:
m = np.zeros(self.data.shape, dtype=np.bool)
cids = self.data.pixel_component_ids
return MaskSubsetState(m, cids)
# In Python 2 we need to do this explicitly
def __ne__(self, other):
return not self.__eq__(other)
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if PY3:
__hash__ = object.__hash__
class SubsetState(object):
def __init__(self):
pass
@property
def attributes(self):
return tuple()
@property
def subset_state(self): # convenience method, mimic interface of Subset
return self
@contract(data='isinstance(Data)')
def to_index_list(self, data):
return np.where(self.to_mask(data).flat)[0]
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
shp = view_shape(data.shape, view)
return np.zeros(shp, dtype=bool)
@contract(returns='isinstance(SubsetState)')
def copy(self):
return SubsetState()
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __or__(self, other_state):
return OrState(self, other_state)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __and__(self, other_state):
return AndState(self, other_state)
@contract(returns='isinstance(SubsetState)')
def __invert__(self):
return InvertState(self)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __xor__(self, other_state):
return XorState(self, other_state)
class RoiSubsetState(SubsetState):
def __init__(self, xatt=None, yatt=None, roi=None):
super(RoiSubsetState, self).__init__()
self.xatt = xatt
self.yatt = yatt
self.roi = roi
@property
def attributes(self):
return (self.xatt, self.yatt)
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.xatt, view]
y = data[self.yatt, view]
result = self.roi.contains(x, y)
assert x.shape == result.shape
return result
def copy(self):
result = RoiSubsetState()
result.xatt = self.xatt
result.yatt = self.yatt
result.roi = self.roi
return result
class CategoricalRoiSubsetState(SubsetState):
def __init__(self, att=None, roi=None):
super(CategoricalRoiSubsetState, self).__init__()
self.att = att
self.roi = roi
@property
def attributes(self):
return self.att,
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data.get_component(self.att)._categorical_data[view]
result = self.roi.contains(x, None)
assert x.shape == result.shape
return result.ravel()
def copy(self):
result = CategoricalRoiSubsetState()
result.att = self.att
result.roi = self.roi
return result
@staticmethod
def from_range(component, att, lo, hi):
roi = CategoricalRoi.from_range(component, lo, hi)
subset = CategoricalRoiSubsetState(roi=roi,
att=att)
return subset
class RangeSubsetState(SubsetState):
def __init__(self, lo, hi, att=None):
super(RangeSubsetState, self).__init__()
self.lo = lo
self.hi = hi
self.att = att
@property
def attributes(self):
return (self.att,)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.att, view]
result = (x >= self.lo) & (x <= self.hi)
return result
def copy(self):
return RangeSubsetState(self.lo, self.hi, self.att)
class CompositeSubsetState(SubsetState):
op = None
def __init__(self, state1, state2=None):
super(CompositeSubsetState, self).__init__()
self.state1 = state1.copy()
if state2:
state2 = state2.copy()
self.state2 = state2
def copy(self):
return type(self)(self.state1, self.state2)
@property
def attributes(self):
att = self.state1.attributes
if self.state2 is not None:
att += self.state2.attributes
return tuple(sorted(set(att)))
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return self.op(self.state1.to_mask(data, view),
self.state2.to_mask(data, view))
def __str__(self):
sym = OPSYM.get(self.op, self.op)
return "(%s %s %s)" % (self.state1, sym, self.state2)
class OrState(CompositeSubsetState):
op = operator.or_
class AndState(CompositeSubsetState):
op = operator.and_
class XorState(CompositeSubsetState):
op = operator.xor
class InvertState(CompositeSubsetState):
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return ~self.state1.to_mask(data, view)
def __str__(self):
return "(~%s)" % self.state1
class MaskSubsetState(SubsetState):
"""
A subset defined by boolean pixel mask
"""
def __init__(self, mask, cids):
"""
:param cids: List of ComponentIDs, defining the pixel coordinate space of the mask
:param mask: Boolean ndarray
"""
self.cids = cids
self.mask = mask
def to_mask(self, data, view=None):
view = view or slice(None)
# shortcut for data on the same pixel grid
if data.pixel_component_ids == self.cids:
return self.mask[view].copy()
# locate each element of data in the coordinate system of the mask
vals = [data[c, view].astype(np.int) for c in self.cids]
result = self.mask[vals]
for v, n in zip(vals, data.shape):
result &= ((v >= 0) & (v < n))
return result
def __gluestate__(self, context):
return dict(cids=[context.id(c) for c in self.cids],
mask=context.do(self.mask))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['mask']),
[context.object(c) for c in rec['cids']])
class CategorySubsetState(SubsetState):
def __init__(self, attribute, values):
super(CategorySubsetState, self).__init__()
self._attribute = attribute
self._values = np.asarray(values).ravel()
@memoize
def to_mask(self, data, view=None):
vals = data[self._attribute, view]
result = np.in1d(vals.ravel(), self._values)
return result.reshape(vals.shape)
def copy(self):
return CategorySubsetState(self._attribute, self._values.copy())
def __gluestate__(self, context):
return dict(att=context.id(self._attribute),
vals=context.do(self._values))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['att']),
context.object(rec['vals']))
class ElementSubsetState(SubsetState):
def __init__(self, indices=None):
super(ElementSubsetState, self).__init__()
self._indices = indices
@memoize
def to_mask(self, data, view=None):
# XXX this is inefficient for views
result = np.zeros(data.shape, dtype=bool)
if self._indices is not None:
result.flat[self._indices] = True
if view is not None:
result = result[view]
return result
def copy(self):
return ElementSubsetState(self._indices)
class InequalitySubsetState(SubsetState):
def __init__(self, left, right, op):
from .component_link import ComponentLink
super(InequalitySubsetState, self).__init__()
from .data import ComponentID
valid_ops = [operator.gt, operator.ge,
operator.lt, operator.le,
operator.eq, operator.ne]
if op not in valid_ops:
raise TypeError("Invalid boolean operator: %s" % op)
if not isinstance(left, ComponentID) and not \
isinstance(left, numbers.Number) and not \
isinstance(left, ComponentLink):
raise TypeError("Input must be ComponenID or NumberType: %s"
% type(left))
if not isinstance(right, ComponentID) and not \
isinstance(right, numbers.Number) and not \
isinstance(right, ComponentLink):
raise TypeError("Input must be ComponenID or NumberType: %s"
% type(right))
self._left = left
self._right = right
self._operator = op
@property
def left(self):
return self._left
@property
def right(self):
return self._right
@property
def operator(self):
return self._operator
@memoize
def to_mask(self, data, view=None):
from .data import ComponentID
left = self._left
if not isinstance(self._left, numbers.Number):
left = data[self._left, view]
right = self._right
if not isinstance(self._right, numbers.Number):
right = data[self._right, view]
return self._operator(left, right)
def copy(self):
return InequalitySubsetState(self._left, self._right, self._operator)
def __str__(self):
sym = OPSYM.get(self._operator, self._operator)
return "(%s %s %s)" % (self._left, sym, self._right)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
@contract(subsets='list(isinstance(Subset))', returns=Subset)
def _combine(subsets, operator):
state = operator(*[s.subset_state for s in subsets])
result = Subset(None)
result.subset_state = state
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/subset.py",
"copies": "1",
"size": "21709",
"license": "bsd-3-clause",
"hash": 2674665567910139000,
"line_mean": 29.4474053296,
"line_max": 90,
"alpha_frac": 0.5937168916,
"autogenerated": false,
"ratio": 4.099131419939577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5192848311539577,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import sys
import types
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
import builtins
from queue import Queue, Empty
from itertools import zip_longest
from io import StringIO, BytesIO
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import quote, unquote
unicode = str
long = int
def apply(func, args, kwargs=None):
if kwargs:
return func(*args, **kwargs)
else:
return func(*args)
range = range
operator_div = operator.truediv
else:
import __builtin__ as builtins
from Queue import Queue, Empty
from itertools import izip_longest as zip_longest
from StringIO import StringIO
from io import BytesIO
from urllib2 import urlopen
from urlparse import urlparse
from urllib import quote, unquote
unicode = unicode
long = long
apply = apply
range = xrange
operator_div = operator.div
def skip(func):
return
def bind_method(cls, name, func):
"""Bind a method to class
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
| {
"repo_name": "PhE/dask",
"path": "dask/compatibility.py",
"copies": "3",
"size": "1579",
"license": "bsd-3-clause",
"hash": 1519786199977895200,
"line_mean": 22.2205882353,
"line_max": 64,
"alpha_frac": 0.6453451552,
"autogenerated": false,
"ratio": 4.373961218836565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001286764705882353,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
import operator
import numpy as np
from toolz import merge
from ..base import tokenize
from ..compatibility import apply
from .core import top, dotmany, Array, eye
from .random import RandomState
def _cumsum_blocks(it):
total = 0
for x in it:
total_previous = total
total += x
yield (total_previous, total)
def _cumsum_part(last, new):
return (last[1], last[1] + new)
def tsqr(data, name=None, compute_svd=False):
""" Direct Tall-and-Skinny QR algorithm
As presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
This algorithm is used to compute both the QR decomposition and the
Singular Value Decomposition. It requires that the input array have a
single column of blocks, each of which fit in memory.
If blocks are of size ``(n, k)`` then this algorithm has memory use that
scales as ``n**2 * k * nthreads``.
Parameters
----------
data: Array
compute_svd: bool
Whether to compute the SVD rather than the QR decomposition
See Also
--------
dask.array.linalg.qr - Powered by this algorithm
dask.array.linalg.svd - Powered by this algorithm
"""
if not (data.ndim == 2 and # Is a matrix
len(data.chunks[1]) == 1): # Only one column block
raise ValueError(
"Input must have the following properties:\n"
" 1. Have two dimensions\n"
" 2. Have only one column of blocks")
prefix = name or 'tsqr-' + tokenize(data, compute_svd)
prefix += '_'
m, n = data.shape
numblocks = (len(data.chunks[0]), 1)
name_qr_st1 = prefix + 'QR_st1'
dsk_qr_st1 = top(np.linalg.qr, name_qr_st1, 'ij', data.name, 'ij',
numblocks={data.name: numblocks})
# qr[0]
name_q_st1 = prefix + 'Q_st1'
dsk_q_st1 = dict(((name_q_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 0))
for i in range(numblocks[0]))
# qr[1]
name_r_st1 = prefix + 'R_st1'
dsk_r_st1 = dict(((name_r_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 1))
for i in range(numblocks[0]))
# Stacking for in-core QR computation
to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]
name_r_st1_stacked = prefix + 'R_st1_stacked'
dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack,
(tuple, to_stack))}
# In-core QR computation
name_qr_st2 = prefix + 'QR_st2'
dsk_qr_st2 = top(np.linalg.qr, name_qr_st2, 'ij', name_r_st1_stacked, 'ij',
numblocks={name_r_st1_stacked: (1, 1)})
# qr[0]
name_q_st2_aux = prefix + 'Q_st2_aux'
dsk_q_st2_aux = {(name_q_st2_aux, 0, 0): (operator.getitem,
(name_qr_st2, 0, 0), 0)}
if not any(np.isnan(c) for cs in data.chunks for c in cs):
q2_block_sizes = [min(e, n) for e in data.chunks[0]]
block_slices = [(slice(e[0], e[1]), slice(0, n))
for e in _cumsum_blocks(q2_block_sizes)]
dsk_q_blockslices = {}
else:
name_q2bs = prefix + 'q2-shape'
dsk_q2_shapes = {(name_q2bs, i): (min, (getattr, (data.name, i, 0), 'shape'))
for i in range(numblocks[0])}
dsk_n = {prefix + 'n': (operator.getitem,
(getattr, (data.name, 0, 0), 'shape'), 1)}
name_q2cs = prefix + 'q2-shape-cumsum'
dsk_q2_cumsum = {(name_q2cs, 0): [0, (name_q2bs, 0)]}
dsk_q2_cumsum.update({(name_q2cs, i): (_cumsum_part,
(name_q2cs, i - 1),
(name_q2bs, i))
for i in range(1, numblocks[0])})
name_blockslice = prefix + 'q2-blockslice'
dsk_block_slices = {(name_blockslice, i): (tuple, [
(apply, slice, (name_q2cs, i)), (slice, 0, prefix + 'n')])
for i in range(numblocks[0])}
dsk_q_blockslices = merge(dsk_n,
dsk_q2_shapes,
dsk_q2_cumsum,
dsk_block_slices)
block_slices = [(name_blockslice, i) for i in range(numblocks[0])]
name_q_st2 = prefix + 'Q_st2'
dsk_q_st2 = dict(((name_q_st2, i, 0),
(operator.getitem, (name_q_st2_aux, 0, 0), b))
for i, b in enumerate(block_slices))
# qr[1]
name_r_st2 = prefix + 'R'
dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}
name_q_st3 = prefix + 'Q'
dsk_q_st3 = top(np.dot, name_q_st3, 'ij', name_q_st1, 'ij',
name_q_st2, 'ij', numblocks={name_q_st1: numblocks,
name_q_st2: numblocks})
dsk_q = {}
dsk_q.update(data.dask)
dsk_q.update(dsk_qr_st1)
dsk_q.update(dsk_q_st1)
dsk_q.update(dsk_r_st1)
dsk_q.update(dsk_r_st1_stacked)
dsk_q.update(dsk_qr_st2)
dsk_q.update(dsk_q_st2_aux)
dsk_q.update(dsk_q_st2)
dsk_q.update(dsk_q_st3)
dsk_q.update(dsk_q_blockslices)
dsk_r = {}
dsk_r.update(data.dask)
dsk_r.update(dsk_qr_st1)
dsk_r.update(dsk_r_st1)
dsk_r.update(dsk_r_st1_stacked)
dsk_r.update(dsk_qr_st2)
dsk_r.update(dsk_r_st2)
if not compute_svd:
qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))
q = Array(dsk_q, name_q_st3, shape=data.shape, chunks=data.chunks,
dtype=qq.dtype)
r = Array(dsk_r, name_r_st2, shape=(n, n), chunks=(n, n),
dtype=rr.dtype)
return q, r
else:
# In-core SVD computation
name_svd_st2 = prefix + 'SVD_st2'
dsk_svd_st2 = top(np.linalg.svd, name_svd_st2, 'ij', name_r_st2, 'ij',
numblocks={name_r_st2: (1, 1)})
# svd[0]
name_u_st2 = prefix + 'U_st2'
dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 0)}
# svd[1]
name_s_st2 = prefix + 'S'
dsk_s_st2 = {(name_s_st2, 0): (operator.getitem,
(name_svd_st2, 0, 0), 1)}
# svd[2]
name_v_st2 = prefix + 'V'
dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 2)}
# Q * U
name_u_st4 = prefix + 'U'
dsk_u_st4 = top(dotmany, name_u_st4, 'ij', name_q_st3, 'ik',
name_u_st2, 'kj', numblocks={name_q_st3: numblocks,
name_u_st2: (1, 1)})
dsk_u = {}
dsk_u.update(dsk_q)
dsk_u.update(dsk_r)
dsk_u.update(dsk_svd_st2)
dsk_u.update(dsk_u_st2)
dsk_u.update(dsk_u_st4)
dsk_s = {}
dsk_s.update(dsk_r)
dsk_s.update(dsk_svd_st2)
dsk_s.update(dsk_s_st2)
dsk_v = {}
dsk_v.update(dsk_r)
dsk_v.update(dsk_svd_st2)
dsk_v.update(dsk_v_st2)
uu, ss, vv = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))
u = Array(dsk_u, name_u_st4, shape=data.shape, chunks=data.chunks,
dtype=uu.dtype)
s = Array(dsk_s, name_s_st2, shape=(n,), chunks=((n,),), dtype=ss.dtype)
v = Array(dsk_v, name_v_st2, shape=(n, n), chunks=((n,), (n,)),
dtype=vv.dtype)
return u, s, v
def compression_level(n, q, oversampling=10, min_subspace_size=20):
""" Compression level to use in svd_compressed
Given the size ``n`` of a space, compress that that to one of size
``q`` plus oversampling.
The oversampling allows for greater flexibility in finding an
appropriate subspace, a low value is often enough (10 is already a
very conservative choice, it can be further reduced).
``q + oversampling`` should not be larger than ``n``. In this
specific implementation, ``q + oversampling`` is at least
``min_subspace_size``.
>>> compression_level(100, 10)
20
"""
return min(max(min_subspace_size, q + oversampling), n)
def compression_matrix(data, q, n_power_iter=0, seed=None):
""" Randomly sample matrix to find most active subspace
This compression matrix returned by this algorithm can be used to
compute both the QR decomposition and the Singular Value
Decomposition.
Parameters
----------
data: Array
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
n_power_iter: int
number of power iterations, useful when the singular values of
the input matrix decay very slowly.
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
n = data.shape[1]
comp_level = compression_level(n, q)
state = RandomState(seed)
omega = state.standard_normal(size=(n, comp_level), chunks=(data.chunks[1],
(comp_level,)))
mat_h = data.dot(omega)
for j in range(n_power_iter):
mat_h = data.dot(data.T.dot(mat_h))
q, _ = tsqr(mat_h)
return q.T
def svd_compressed(a, k, n_power_iter=0, seed=None, name=None):
""" Randomly compressed rank-k thin Singular Value Decomposition.
This computes the approximate singular value decomposition of a large
array. This algorithm is generally faster than the normal algorithm
but does not provide exact results. One can balance between
performance and accuracy with input parameters (see below).
Parameters
----------
a: Array
Input array
k: int
Rank of the desired thin SVD decomposition.
n_power_iter: int
Number of power iterations, useful when the singular values
decay slowly. Error decreases exponentially as n_power_iter
increases. In practice, set n_power_iter <= 4.
Examples
--------
>>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
comp = compression_matrix(a, k, n_power_iter=n_power_iter, seed=seed)
a_compressed = comp.dot(a)
v, s, u = tsqr(a_compressed.T, name, compute_svd=True)
u = comp.T.dot(u)
v = v.T
u = u[:, :k]
s = s[:k]
v = v[:k, :]
return u, s, v
def qr(a, name=None):
"""
Compute the qr factorization of a matrix.
Examples
--------
>>> q, r = da.linalg.qr(x) # doctest: +SKIP
Returns
-------
q: Array, orthonormal
r: Array, upper-triangular
See Also
--------
np.linalg.qr : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name)
def svd(a, name=None):
"""
Compute the singular value decomposition of a matrix.
Examples
--------
>>> u, s, v = da.linalg.svd(x) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
See Also
--------
np.linalg.svd : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name, compute_svd=True)
def _solve_triangular_lower(a, b):
import scipy.linalg
return scipy.linalg.solve_triangular(a, b, lower=True)
def lu(a):
"""
Compute the lu decomposition of a matrix.
Examples
--------
>>> p, l, u = da.linalg.lu(x) # doctest: +SKIP
Returns
-------
p: Array, permutation matrix
l: Array, lower triangular matrix with unit diagonal.
u: Array, upper triangular matrix
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform lu decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform lu decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name_lu = 'lu-lu-' + token
name_p = 'lu-p-' + token
name_l = 'lu-l-' + token
name_u = 'lu-u-' + token
# for internal calculation
name_p_inv = 'lu-p-inv-' + token
name_l_permuted = 'lu-l-permute-' + token
name_u_transposed = 'lu-u-transpose-' + token
name_plu_dot = 'lu-plu-dot-' + token
name_lu_dot = 'lu-lu-dot-' + token
dsk = {}
for i in range(min(vdim, hdim)):
target = (a.name, i, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, i, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# diagonal block
dsk[name_lu, i, i] = (scipy.linalg.lu, target)
# sweep to horizontal
for j in range(i + 1, hdim):
target = (np.dot, (name_p_inv, i, i), (a.name, i, j))
if i > 0:
prevs = []
for p in range(i):
prev = name_lu_dot, i, p, p, j
dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_lu, i, j] = (_solve_triangular_lower,
(name_l, i, i), target)
# sweep to vertical
for k in range(i + 1, vdim):
target = (a.name, k, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, k, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T
dsk[name_lu, k, i] = (np.transpose,
(_solve_triangular_lower,
(name_u_transposed, i, i),
(np.transpose, target)))
for i in range(min(vdim, hdim)):
for j in range(min(vdim, hdim)):
if i == j:
dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)
dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)
dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)
# permuted l is required to be propagated to i > j blocks
dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))
dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))
# transposed permutation matrix is equal to its inverse
dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))
elif i > j:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
# calculations are performed using permuted l,
# thus the result should be reverted by inverted (=transposed) p
# to have the same row order as diagonal blocks
dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))
dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l_permuted, i, j] = (name_lu, i, j)
else:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_u, i, j] = (name_lu, i, j)
# l_permuted is not referred in upper triangulars
dsk.update(a.dask)
pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))
p = Array(dsk, name_p, shape=a.shape, chunks=a.chunks, dtype=pp.dtype)
l = Array(dsk, name_l, shape=a.shape, chunks=a.chunks, dtype=ll.dtype)
u = Array(dsk, name_u, shape=a.shape, chunks=a.chunks, dtype=uu.dtype)
return p, l, u
def solve_triangular(a, b, lower=False):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
Returns
-------
x : (M,) or (M, N) array
Solution to the system `a x = b`. Shape of return matches `b`.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('a must be 2 dimensional')
if b.ndim <= 2:
if a.shape[1] != b.shape[0]:
raise ValueError('a.shape[1] and b.shape[0] must be equal')
if a.chunks[1] != b.chunks[0]:
msg = ('a.chunks[1] and b.chunks[0] must be equal. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
else:
raise ValueError('b must be 1 or 2 dimensional')
vchunks = len(a.chunks[1])
hchunks = 1 if b.ndim == 1 else len(b.chunks[1])
token = tokenize(a, b, lower)
name = 'solve-triangular-' + token
# for internal calculation
# (name, i, j, k, l) corresponds to a_ij.dot(b_kl)
name_mdot = 'solve-tri-dot-' + token
def _b_init(i, j):
if b.ndim == 1:
return b.name, i
else:
return b.name, i, j
def _key(i, j):
if b.ndim == 1:
return name, i
else:
return name, i, j
dsk = {}
if lower:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i > 0:
prevs = []
for k in range(i):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)
else:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i < vchunks - 1:
prevs = []
for k in range(i + 1, vchunks):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (scipy.linalg.solve_triangular, (a.name, i, i), target)
dsk.update(a.dask)
dsk.update(b.dask)
res = _solve_triangular_lower(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
return Array(dsk, name, shape=b.shape, chunks=b.chunks, dtype=res.dtype)
def solve(a, b, sym_pos=False):
"""
Solve the equation ``a x = b`` for ``x``. By default, use LU
decomposition and forward / backward substitutions. When ``sym_pos`` is
``True``, use Cholesky decomposition.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume a is symmetric and positive definite. If ``True``, use Cholesky
decomposition.
Returns
-------
x : (M,) or (M, N) Array
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
"""
if sym_pos:
l, u = _cholesky(a)
else:
p, l, u = lu(a)
b = p.T.dot(b)
uy = solve_triangular(l, b, lower=True)
return solve_triangular(u, uy)
def inv(a):
"""
Compute the inverse of a matrix with LU decomposition and
forward / backward substitutions.
Parameters
----------
a : array_like
Square matrix to be inverted.
Returns
-------
ainv : Array
Inverse of the matrix `a`.
"""
return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))
def _cholesky_lower(a):
import scipy.linalg
return scipy.linalg.cholesky(a, lower=True)
def cholesky(a, lower=False):
"""
Returns the Cholesky decomposition, :math:`A = L L^*` or
:math:`A = U^* U` of a Hermitian positive-definite matrix A.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky
factorization. Default is upper-triangular.
Returns
-------
c : (M, M) Array
Upper- or lower-triangular Cholesky factor of `a`.
"""
l, u = _cholesky(a)
if lower:
return l
else:
return u
def _cholesky(a):
"""
Private function to perform Cholesky decomposition, which returns both
lower and upper triangulars.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform cholesky decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform cholesky decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform cholesky decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name = 'cholesky-' + token
# (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)
name_lt_dot = 'cholesky-lt-dot-' + token
# because transposed results are needed for calculation,
# we can build graph for upper triangular simultaneously
name_upper = 'cholesky-upper-' + token
# calculates lower triangulars because subscriptions get simpler
dsk = {}
for i in range(vdim):
for j in range(hdim):
if i < j:
dsk[name, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_upper, j, i] = (name, i, j)
elif i == j:
target = (a.name, i, j)
if i > 0:
prevs = []
for p in range(i):
prev = name_lt_dot, i, p, i, p
dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name, i, i] = (_cholesky_lower, target)
dsk[name_upper, i, i] = (np.transpose, (name, i, i))
else:
# solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to
# L11.dot(x.T) = A21.T - L10.dot(L20.T)
# L11.dot(x.T) = A12 - L10.dot(L02)
target = (a.name, j, i)
if j > 0:
prevs = []
for p in range(j):
prev = name_lt_dot, j, p, i, p
dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_upper, j, i] = (_solve_triangular_lower,(name, j, j), target)
dsk[name, i, j] = (np.transpose, (name_upper, j, i))
dsk.update(a.dask)
cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
lower = Array(dsk, name, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
# do not use .T, because part of transposed blocks are already calculated
upper = Array(dsk, name_upper, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
return lower, upper
def _sort_decreasing(x):
x[::-1].sort()
return x
def lstsq(a, b):
"""
Return the least-squares solution to a linear matrix equation using
QR decomposition.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : (M,) array_like
Ordinate or "dependent variable" values.
Returns
-------
x : (N,) Array
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : (1,) Array
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
rank : Array
Rank of matrix `a`.
s : (min(M, N),) Array
Singular values of `a`.
"""
q, r = qr(a)
x = solve_triangular(r, q.T.dot(b))
residuals = b - a.dot(x)
residuals = (residuals ** 2).sum(keepdims=True)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = 'lstsq-rank-' + token
rdsk = {(rname, ): (np.linalg.matrix_rank, (r.name, 0, 0))}
rdsk.update(r.dask)
# rank must be an integer
rank = Array(rdsk, rname, shape=(), chunks=(), dtype=int)
# singular
sname = 'lstsq-singular-' + token
rt = r.T
sdsk = {(sname, 0): (_sort_decreasing,
(np.sqrt,
(np.linalg.eigvals,
(np.dot, (rt.name, 0, 0), (r.name, 0, 0)))))}
sdsk.update(rt.dask)
_, _, _, ss = np.linalg.lstsq(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
s = Array(sdsk, sname, shape=(r.shape[0], ),
chunks=r.shape[0], dtype=ss.dtype)
return x, residuals, rank, s
| {
"repo_name": "chrisbarber/dask",
"path": "dask/array/linalg.py",
"copies": "2",
"size": "27391",
"license": "bsd-3-clause",
"hash": -3576410441457572400,
"line_mean": 32.9417596035,
"line_max": 91,
"alpha_frac": 0.5301741448,
"autogenerated": false,
"ratio": 3.258505829169641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4788679973969641,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import numpy as np
import toolz
from ..base import tokenize
from ..compatibility import apply
from .. import sharedict
from .core import top, dotmany, Array, eye
from .random import RandomState
def _cumsum_blocks(it):
total = 0
for x in it:
total_previous = total
total += x
yield (total_previous, total)
def _cumsum_part(last, new):
return (last[1], last[1] + new)
def tsqr(data, name=None, compute_svd=False):
""" Direct Tall-and-Skinny QR algorithm
As presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
This algorithm is used to compute both the QR decomposition and the
Singular Value Decomposition. It requires that the input array have a
single column of blocks, each of which fit in memory.
If blocks are of size ``(n, k)`` then this algorithm has memory use that
scales as ``n**2 * k * nthreads``.
Parameters
----------
data: Array
compute_svd: bool
Whether to compute the SVD rather than the QR decomposition
See Also
--------
dask.array.linalg.qr - Powered by this algorithm
dask.array.linalg.svd - Powered by this algorithm
"""
if not (data.ndim == 2 and # Is a matrix
len(data.chunks[1]) == 1): # Only one column block
raise ValueError(
"Input must have the following properties:\n"
" 1. Have two dimensions\n"
" 2. Have only one column of blocks")
prefix = name or 'tsqr-' + tokenize(data, compute_svd)
prefix += '_'
m, n = data.shape
numblocks = (len(data.chunks[0]), 1)
name_qr_st1 = prefix + 'QR_st1'
dsk_qr_st1 = top(np.linalg.qr, name_qr_st1, 'ij', data.name, 'ij',
numblocks={data.name: numblocks})
# qr[0]
name_q_st1 = prefix + 'Q_st1'
dsk_q_st1 = dict(((name_q_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 0))
for i in range(numblocks[0]))
# qr[1]
name_r_st1 = prefix + 'R_st1'
dsk_r_st1 = dict(((name_r_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 1))
for i in range(numblocks[0]))
# Stacking for in-core QR computation
to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]
name_r_st1_stacked = prefix + 'R_st1_stacked'
dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack,
(tuple, to_stack))}
# In-core QR computation
name_qr_st2 = prefix + 'QR_st2'
dsk_qr_st2 = top(np.linalg.qr, name_qr_st2, 'ij', name_r_st1_stacked, 'ij',
numblocks={name_r_st1_stacked: (1, 1)})
# qr[0]
name_q_st2_aux = prefix + 'Q_st2_aux'
dsk_q_st2_aux = {(name_q_st2_aux, 0, 0): (operator.getitem,
(name_qr_st2, 0, 0), 0)}
if not any(np.isnan(c) for cs in data.chunks for c in cs):
q2_block_sizes = [min(e, n) for e in data.chunks[0]]
block_slices = [(slice(e[0], e[1]), slice(0, n))
for e in _cumsum_blocks(q2_block_sizes)]
dsk_q_blockslices = {}
else:
name_q2bs = prefix + 'q2-shape'
dsk_q2_shapes = {(name_q2bs, i): (min, (getattr, (data.name, i, 0), 'shape'))
for i in range(numblocks[0])}
dsk_n = {prefix + 'n': (operator.getitem,
(getattr, (data.name, 0, 0), 'shape'), 1)}
name_q2cs = prefix + 'q2-shape-cumsum'
dsk_q2_cumsum = {(name_q2cs, 0): [0, (name_q2bs, 0)]}
dsk_q2_cumsum.update({(name_q2cs, i): (_cumsum_part,
(name_q2cs, i - 1),
(name_q2bs, i))
for i in range(1, numblocks[0])})
name_blockslice = prefix + 'q2-blockslice'
dsk_block_slices = {(name_blockslice, i): (tuple, [
(apply, slice, (name_q2cs, i)), (slice, 0, prefix + 'n')])
for i in range(numblocks[0])}
dsk_q_blockslices = toolz.merge(dsk_n,
dsk_q2_shapes,
dsk_q2_cumsum,
dsk_block_slices)
block_slices = [(name_blockslice, i) for i in range(numblocks[0])]
name_q_st2 = prefix + 'Q_st2'
dsk_q_st2 = dict(((name_q_st2, i, 0),
(operator.getitem, (name_q_st2_aux, 0, 0), b))
for i, b in enumerate(block_slices))
# qr[1]
name_r_st2 = prefix + 'R'
dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}
name_q_st3 = prefix + 'Q'
dsk_q_st3 = top(np.dot, name_q_st3, 'ij', name_q_st1, 'ij',
name_q_st2, 'ij', numblocks={name_q_st1: numblocks,
name_q_st2: numblocks})
dsk = sharedict.ShareDict()
dsk.update(data.dask)
dsk.update_with_key(dsk_qr_st1, key=name_qr_st1)
dsk.update_with_key(dsk_q_st1, key=name_q_st1)
dsk.update_with_key(dsk_r_st1, key=name_r_st1)
dsk.update_with_key(dsk_r_st1_stacked, key=name_r_st1_stacked)
dsk.update_with_key(dsk_qr_st2, key=name_qr_st2)
dsk.update_with_key(dsk_q_st2_aux, key=name_q_st2_aux)
dsk.update_with_key(dsk_q_st2, key=name_q_st2)
dsk.update_with_key(dsk_q_st3, key=name_q_st3)
dsk.update_with_key(dsk_q_blockslices, key=prefix + '-q-blockslices')
dsk.update_with_key(dsk_r_st2, key=name_r_st2)
if not compute_svd:
qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))
q = Array(dsk, name_q_st3,
shape=data.shape, chunks=data.chunks, dtype=qq.dtype)
r = Array(dsk, name_r_st2,
shape=(n, n), chunks=(n, n), dtype=rr.dtype)
return q, r
else:
# In-core SVD computation
name_svd_st2 = prefix + 'SVD_st2'
dsk_svd_st2 = top(np.linalg.svd, name_svd_st2, 'ij', name_r_st2, 'ij',
numblocks={name_r_st2: (1, 1)})
# svd[0]
name_u_st2 = prefix + 'U_st2'
dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 0)}
# svd[1]
name_s_st2 = prefix + 'S'
dsk_s_st2 = {(name_s_st2, 0): (operator.getitem,
(name_svd_st2, 0, 0), 1)}
# svd[2]
name_v_st2 = prefix + 'V'
dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 2)}
# Q * U
name_u_st4 = prefix + 'U'
dsk_u_st4 = top(dotmany, name_u_st4, 'ij', name_q_st3, 'ik',
name_u_st2, 'kj', numblocks={name_q_st3: numblocks,
name_u_st2: (1, 1)})
dsk.update_with_key(dsk_svd_st2, key=name_svd_st2)
dsk.update_with_key(dsk_u_st2, key=name_u_st2)
dsk.update_with_key(dsk_u_st4, key=name_u_st4)
dsk.update_with_key(dsk_s_st2, key=name_s_st2)
dsk.update_with_key(dsk_v_st2, key=name_v_st2)
uu, ss, vv = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))
u = Array(dsk, name_u_st4, shape=data.shape, chunks=data.chunks,
dtype=uu.dtype)
s = Array(dsk, name_s_st2, shape=(n,), chunks=((n,),), dtype=ss.dtype)
v = Array(dsk, name_v_st2, shape=(n, n), chunks=((n,), (n,)),
dtype=vv.dtype)
return u, s, v
def compression_level(n, q, oversampling=10, min_subspace_size=20):
""" Compression level to use in svd_compressed
Given the size ``n`` of a space, compress that that to one of size
``q`` plus oversampling.
The oversampling allows for greater flexibility in finding an
appropriate subspace, a low value is often enough (10 is already a
very conservative choice, it can be further reduced).
``q + oversampling`` should not be larger than ``n``. In this
specific implementation, ``q + oversampling`` is at least
``min_subspace_size``.
>>> compression_level(100, 10)
20
"""
return min(max(min_subspace_size, q + oversampling), n)
def compression_matrix(data, q, n_power_iter=0, seed=None):
""" Randomly sample matrix to find most active subspace
This compression matrix returned by this algorithm can be used to
compute both the QR decomposition and the Singular Value
Decomposition.
Parameters
----------
data: Array
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
n_power_iter: int
number of power iterations, useful when the singular values of
the input matrix decay very slowly.
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
n = data.shape[1]
comp_level = compression_level(n, q)
state = RandomState(seed)
omega = state.standard_normal(size=(n, comp_level), chunks=(data.chunks[1],
(comp_level,)))
mat_h = data.dot(omega)
for j in range(n_power_iter):
mat_h = data.dot(data.T.dot(mat_h))
q, _ = tsqr(mat_h)
return q.T
def svd_compressed(a, k, n_power_iter=0, seed=None, name=None):
""" Randomly compressed rank-k thin Singular Value Decomposition.
This computes the approximate singular value decomposition of a large
array. This algorithm is generally faster than the normal algorithm
but does not provide exact results. One can balance between
performance and accuracy with input parameters (see below).
Parameters
----------
a: Array
Input array
k: int
Rank of the desired thin SVD decomposition.
n_power_iter: int
Number of power iterations, useful when the singular values
decay slowly. Error decreases exponentially as n_power_iter
increases. In practice, set n_power_iter <= 4.
Examples
--------
>>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
comp = compression_matrix(a, k, n_power_iter=n_power_iter, seed=seed)
a_compressed = comp.dot(a)
v, s, u = tsqr(a_compressed.T, name, compute_svd=True)
u = comp.T.dot(u)
v = v.T
u = u[:, :k]
s = s[:k]
v = v[:k, :]
return u, s, v
def qr(a, name=None):
"""
Compute the qr factorization of a matrix.
Examples
--------
>>> q, r = da.linalg.qr(x) # doctest: +SKIP
Returns
-------
q: Array, orthonormal
r: Array, upper-triangular
See Also
--------
np.linalg.qr : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name)
def svd(a, name=None):
"""
Compute the singular value decomposition of a matrix.
Examples
--------
>>> u, s, v = da.linalg.svd(x) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
See Also
--------
np.linalg.svd : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name, compute_svd=True)
def _solve_triangular_lower(a, b):
import scipy.linalg
return scipy.linalg.solve_triangular(a, b, lower=True)
def lu(a):
"""
Compute the lu decomposition of a matrix.
Examples
--------
>>> p, l, u = da.linalg.lu(x) # doctest: +SKIP
Returns
-------
p: Array, permutation matrix
l: Array, lower triangular matrix with unit diagonal.
u: Array, upper triangular matrix
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform lu decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform lu decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name_lu = 'lu-lu-' + token
name_p = 'lu-p-' + token
name_l = 'lu-l-' + token
name_u = 'lu-u-' + token
# for internal calculation
name_p_inv = 'lu-p-inv-' + token
name_l_permuted = 'lu-l-permute-' + token
name_u_transposed = 'lu-u-transpose-' + token
name_plu_dot = 'lu-plu-dot-' + token
name_lu_dot = 'lu-lu-dot-' + token
dsk = {}
for i in range(min(vdim, hdim)):
target = (a.name, i, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, i, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# diagonal block
dsk[name_lu, i, i] = (scipy.linalg.lu, target)
# sweep to horizontal
for j in range(i + 1, hdim):
target = (np.dot, (name_p_inv, i, i), (a.name, i, j))
if i > 0:
prevs = []
for p in range(i):
prev = name_lu_dot, i, p, p, j
dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_lu, i, j] = (_solve_triangular_lower,
(name_l, i, i), target)
# sweep to vertical
for k in range(i + 1, vdim):
target = (a.name, k, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, k, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T
dsk[name_lu, k, i] = (np.transpose,
(_solve_triangular_lower,
(name_u_transposed, i, i),
(np.transpose, target)))
for i in range(min(vdim, hdim)):
for j in range(min(vdim, hdim)):
if i == j:
dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)
dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)
dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)
# permuted l is required to be propagated to i > j blocks
dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))
dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))
# transposed permutation matrix is equal to its inverse
dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))
elif i > j:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
# calculations are performed using permuted l,
# thus the result should be reverted by inverted (=transposed) p
# to have the same row order as diagonal blocks
dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))
dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l_permuted, i, j] = (name_lu, i, j)
else:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_u, i, j] = (name_lu, i, j)
# l_permuted is not referred in upper triangulars
dsk = sharedict.merge(a.dask, ('lu-' + token, dsk))
pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))
p = Array(dsk, name_p, shape=a.shape, chunks=a.chunks, dtype=pp.dtype)
l = Array(dsk, name_l, shape=a.shape, chunks=a.chunks, dtype=ll.dtype)
u = Array(dsk, name_u, shape=a.shape, chunks=a.chunks, dtype=uu.dtype)
return p, l, u
def solve_triangular(a, b, lower=False):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
Returns
-------
x : (M,) or (M, N) array
Solution to the system `a x = b`. Shape of return matches `b`.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('a must be 2 dimensional')
if b.ndim <= 2:
if a.shape[1] != b.shape[0]:
raise ValueError('a.shape[1] and b.shape[0] must be equal')
if a.chunks[1] != b.chunks[0]:
msg = ('a.chunks[1] and b.chunks[0] must be equal. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
else:
raise ValueError('b must be 1 or 2 dimensional')
vchunks = len(a.chunks[1])
hchunks = 1 if b.ndim == 1 else len(b.chunks[1])
token = tokenize(a, b, lower)
name = 'solve-triangular-' + token
# for internal calculation
# (name, i, j, k, l) corresponds to a_ij.dot(b_kl)
name_mdot = 'solve-tri-dot-' + token
def _b_init(i, j):
if b.ndim == 1:
return b.name, i
else:
return b.name, i, j
def _key(i, j):
if b.ndim == 1:
return name, i
else:
return name, i, j
dsk = {}
if lower:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i > 0:
prevs = []
for k in range(i):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)
else:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i < vchunks - 1:
prevs = []
for k in range(i + 1, vchunks):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (scipy.linalg.solve_triangular, (a.name, i, i), target)
dsk = sharedict.merge(a.dask, b.dask, (name, dsk))
res = _solve_triangular_lower(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
return Array(dsk, name, shape=b.shape, chunks=b.chunks, dtype=res.dtype)
def solve(a, b, sym_pos=False):
"""
Solve the equation ``a x = b`` for ``x``. By default, use LU
decomposition and forward / backward substitutions. When ``sym_pos`` is
``True``, use Cholesky decomposition.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume a is symmetric and positive definite. If ``True``, use Cholesky
decomposition.
Returns
-------
x : (M,) or (M, N) Array
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
"""
if sym_pos:
l, u = _cholesky(a)
else:
p, l, u = lu(a)
b = p.T.dot(b)
uy = solve_triangular(l, b, lower=True)
return solve_triangular(u, uy)
def inv(a):
"""
Compute the inverse of a matrix with LU decomposition and
forward / backward substitutions.
Parameters
----------
a : array_like
Square matrix to be inverted.
Returns
-------
ainv : Array
Inverse of the matrix `a`.
"""
return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))
def _cholesky_lower(a):
import scipy.linalg
return scipy.linalg.cholesky(a, lower=True)
def cholesky(a, lower=False):
"""
Returns the Cholesky decomposition, :math:`A = L L^*` or
:math:`A = U^* U` of a Hermitian positive-definite matrix A.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky
factorization. Default is upper-triangular.
Returns
-------
c : (M, M) Array
Upper- or lower-triangular Cholesky factor of `a`.
"""
l, u = _cholesky(a)
if lower:
return l
else:
return u
def _cholesky(a):
"""
Private function to perform Cholesky decomposition, which returns both
lower and upper triangulars.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform cholesky decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform cholesky decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform cholesky decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name = 'cholesky-' + token
# (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)
name_lt_dot = 'cholesky-lt-dot-' + token
# because transposed results are needed for calculation,
# we can build graph for upper triangular simultaneously
name_upper = 'cholesky-upper-' + token
# calculates lower triangulars because subscriptions get simpler
dsk = {}
for i in range(vdim):
for j in range(hdim):
if i < j:
dsk[name, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_upper, j, i] = (name, i, j)
elif i == j:
target = (a.name, i, j)
if i > 0:
prevs = []
for p in range(i):
prev = name_lt_dot, i, p, i, p
dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name, i, i] = (_cholesky_lower, target)
dsk[name_upper, i, i] = (np.transpose, (name, i, i))
else:
# solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to
# L11.dot(x.T) = A21.T - L10.dot(L20.T)
# L11.dot(x.T) = A12 - L10.dot(L02)
target = (a.name, j, i)
if j > 0:
prevs = []
for p in range(j):
prev = name_lt_dot, j, p, i, p
dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_upper, j, i] = (_solve_triangular_lower,(name, j, j), target)
dsk[name, i, j] = (np.transpose, (name_upper, j, i))
dsk = sharedict.merge(a.dask, (name, dsk))
cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
lower = Array(dsk, name, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
# do not use .T, because part of transposed blocks are already calculated
upper = Array(dsk, name_upper, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
return lower, upper
def _sort_decreasing(x):
x[::-1].sort()
return x
def lstsq(a, b):
"""
Return the least-squares solution to a linear matrix equation using
QR decomposition.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : (M,) array_like
Ordinate or "dependent variable" values.
Returns
-------
x : (N,) Array
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : (1,) Array
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
rank : Array
Rank of matrix `a`.
s : (min(M, N),) Array
Singular values of `a`.
"""
q, r = qr(a)
x = solve_triangular(r, q.T.dot(b))
residuals = b - a.dot(x)
residuals = (residuals ** 2).sum(keepdims=True)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = 'lstsq-rank-' + token
rdsk = {(rname, ): (np.linalg.matrix_rank, (r.name, 0, 0))}
rdsk = sharedict.merge(r.dask, (rname, rdsk))
# rank must be an integer
rank = Array(rdsk, rname, shape=(), chunks=(), dtype=int)
# singular
sname = 'lstsq-singular-' + token
rt = r.T
sdsk = {(sname, 0): (_sort_decreasing,
(np.sqrt,
(np.linalg.eigvals,
(np.dot, (rt.name, 0, 0), (r.name, 0, 0)))))}
sdsk = sharedict.merge(rt.dask, (sname, sdsk))
_, _, _, ss = np.linalg.lstsq(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
s = Array(sdsk, sname, shape=(r.shape[0], ),
chunks=r.shape[0], dtype=ss.dtype)
return x, residuals, rank, s
| {
"repo_name": "cpcloud/dask",
"path": "dask/array/linalg.py",
"copies": "2",
"size": "27526",
"license": "bsd-3-clause",
"hash": -2251336822752956200,
"line_mean": 33.7550505051,
"line_max": 91,
"alpha_frac": 0.533858897,
"autogenerated": false,
"ratio": 3.266018035121025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47998769321210255,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import numpy as np
from ..base import tokenize
from .core import top, dotmany, Array, eye
from .random import RandomState
def _cumsum_blocks(it):
total = 0
for x in it:
total_previous = total
total += x
yield (total_previous, total)
def tsqr(data, name=None, compute_svd=False):
""" Direct Tall-and-Skinny QR algorithm
As presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
This algorithm is used to compute both the QR decomposition and the
Singular Value Decomposition. It requires that the input array have a
single column of blocks, each of which fit in memory.
If blocks are of size ``(n, k)`` then this algorithm has memory use that
scales as ``n**2 * k * nthreads``.
Parameters
----------
data: Array
compute_svd: bool
Whether to compute the SVD rather than the QR decomposition
See Also
--------
dask.array.linalg.qr - Powered by this algorithm
dask.array.linalg.svd - Powered by this algorithm
"""
if not (data.ndim == 2 and # Is a matrix
len(data.chunks[1]) == 1): # Only one column block
raise ValueError(
"Input must have the following properties:\n"
" 1. Have two dimensions\n"
" 2. Have only one column of blocks")
prefix = name or 'tsqr-' + tokenize(data, compute_svd)
prefix += '_'
m, n = data.shape
numblocks = (len(data.chunks[0]), 1)
name_qr_st1 = prefix + 'QR_st1'
dsk_qr_st1 = top(np.linalg.qr, name_qr_st1, 'ij', data.name, 'ij',
numblocks={data.name: numblocks})
# qr[0]
name_q_st1 = prefix + 'Q_st1'
dsk_q_st1 = dict(((name_q_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 0))
for i in range(numblocks[0]))
# qr[1]
name_r_st1 = prefix + 'R_st1'
dsk_r_st1 = dict(((name_r_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 1))
for i in range(numblocks[0]))
# Stacking for in-core QR computation
to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]
name_r_st1_stacked = prefix + 'R_st1_stacked'
dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack,
(tuple, to_stack))}
# In-core QR computation
name_qr_st2 = prefix + 'QR_st2'
dsk_qr_st2 = top(np.linalg.qr, name_qr_st2, 'ij', name_r_st1_stacked, 'ij',
numblocks={name_r_st1_stacked: (1, 1)})
# qr[0]
name_q_st2_aux = prefix + 'Q_st2_aux'
dsk_q_st2_aux = {(name_q_st2_aux, 0, 0): (operator.getitem,
(name_qr_st2, 0, 0), 0)}
q2_block_sizes = [min(e, n) for e in data.chunks[0]]
block_slices = [(slice(e[0], e[1]), slice(0, n))
for e in _cumsum_blocks(q2_block_sizes)]
name_q_st2 = prefix + 'Q_st2'
dsk_q_st2 = dict(((name_q_st2, i, 0),
(operator.getitem, (name_q_st2_aux, 0, 0), b))
for i, b in enumerate(block_slices))
# qr[1]
name_r_st2 = prefix + 'R'
dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}
name_q_st3 = prefix + 'Q'
dsk_q_st3 = top(np.dot, name_q_st3, 'ij', name_q_st1, 'ij',
name_q_st2, 'ij', numblocks={name_q_st1: numblocks,
name_q_st2: numblocks})
dsk_q = {}
dsk_q.update(data.dask)
dsk_q.update(dsk_qr_st1)
dsk_q.update(dsk_q_st1)
dsk_q.update(dsk_r_st1)
dsk_q.update(dsk_r_st1_stacked)
dsk_q.update(dsk_qr_st2)
dsk_q.update(dsk_q_st2_aux)
dsk_q.update(dsk_q_st2)
dsk_q.update(dsk_q_st3)
dsk_r = {}
dsk_r.update(data.dask)
dsk_r.update(dsk_qr_st1)
dsk_r.update(dsk_r_st1)
dsk_r.update(dsk_r_st1_stacked)
dsk_r.update(dsk_qr_st2)
dsk_r.update(dsk_r_st2)
if not compute_svd:
qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))
q = Array(dsk_q, name_q_st3, shape=data.shape, chunks=data.chunks,
dtype=qq.dtype)
r = Array(dsk_r, name_r_st2, shape=(n, n), chunks=(n, n),
dtype=rr.dtype)
return q, r
else:
# In-core SVD computation
name_svd_st2 = prefix + 'SVD_st2'
dsk_svd_st2 = top(np.linalg.svd, name_svd_st2, 'ij', name_r_st2, 'ij',
numblocks={name_r_st2: (1, 1)})
# svd[0]
name_u_st2 = prefix + 'U_st2'
dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 0)}
# svd[1]
name_s_st2 = prefix + 'S'
dsk_s_st2 = {(name_s_st2, 0): (operator.getitem,
(name_svd_st2, 0, 0), 1)}
# svd[2]
name_v_st2 = prefix + 'V'
dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 2)}
# Q * U
name_u_st4 = prefix + 'U'
dsk_u_st4 = top(dotmany, name_u_st4, 'ij', name_q_st3, 'ik',
name_u_st2, 'kj', numblocks={name_q_st3: numblocks,
name_u_st2: (1, 1)})
dsk_u = {}
dsk_u.update(dsk_q)
dsk_u.update(dsk_r)
dsk_u.update(dsk_svd_st2)
dsk_u.update(dsk_u_st2)
dsk_u.update(dsk_u_st4)
dsk_s = {}
dsk_s.update(dsk_r)
dsk_s.update(dsk_svd_st2)
dsk_s.update(dsk_s_st2)
dsk_v = {}
dsk_v.update(dsk_r)
dsk_v.update(dsk_svd_st2)
dsk_v.update(dsk_v_st2)
uu, ss, vv = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))
u = Array(dsk_u, name_u_st4, shape=data.shape, chunks=data.chunks,
dtype=uu.dtype)
s = Array(dsk_s, name_s_st2, shape=(n,), chunks=(n,), dtype=ss.dtype)
v = Array(dsk_v, name_v_st2, shape=(n, n), chunks=(n, n),
dtype=vv.dtype)
return u, s, v
def compression_level(n, q, oversampling=10, min_subspace_size=20):
""" Compression level to use in svd_compressed
Given the size ``n`` of a space, compress that that to one of size
``q`` plus oversampling.
The oversampling allows for greater flexibility in finding an
appropriate subspace, a low value is often enough (10 is already a
very conservative choice, it can be further reduced).
``q + oversampling`` should not be larger than ``n``. In this
specific implementation, ``q + oversampling`` is at least
``min_subspace_size``.
>>> compression_level(100, 10)
20
"""
return min(max(min_subspace_size, q + oversampling), n)
def compression_matrix(data, q, n_power_iter=0, seed=None):
""" Randomly sample matrix to find most active subspace
This compression matrix returned by this algorithm can be used to
compute both the QR decomposition and the Singular Value
Decomposition.
Parameters
----------
data: Array
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
n_power_iter: int
number of power iterations, useful when the singular values of
the input matrix decay very slowly.
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
n = data.shape[1]
comp_level = compression_level(n, q)
state = RandomState(seed)
omega = state.standard_normal(size=(n, comp_level), chunks=(data.chunks[1],
(comp_level,)))
mat_h = data.dot(omega)
for j in range(n_power_iter):
mat_h = data.dot(data.T.dot(mat_h))
q, _ = tsqr(mat_h)
return q.T
def svd_compressed(a, k, n_power_iter=0, seed=None, name=None):
""" Randomly compressed rank-k thin Singular Value Decomposition.
This computes the approximate singular value decomposition of a large
array. This algorithm is generally faster than the normal algorithm
but does not provide exact results. One can balance between
performance and accuracy with input parameters (see below).
Parameters
----------
a: Array
Input array
k: int
Rank of the desired thin SVD decomposition.
n_power_iter: int
Number of power iterations, useful when the singular values
decay slowly. Error decreases exponentially as n_power_iter
increases. In practice, set n_power_iter <= 4.
Examples
--------
>>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
References
----------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
comp = compression_matrix(a, k, n_power_iter=n_power_iter, seed=seed)
a_compressed = comp.dot(a)
v, s, u = tsqr(a_compressed.T, name, compute_svd=True)
u = comp.T.dot(u)
v = v.T
u = u[:, :k]
s = s[:k]
v = v[:k, :]
return u, s, v
def qr(a, name=None):
"""
Compute the qr factorization of a matrix.
Examples
--------
>>> q, r = da.linalg.qr(x) # doctest: +SKIP
Returns
-------
q: Array, orthonormal
r: Array, upper-triangular
See Also
--------
np.linalg.qr : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name)
def svd(a, name=None):
"""
Compute the singular value decomposition of a matrix.
Examples
--------
>>> u, s, v = da.linalg.svd(x) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
See Also
--------
np.linalg.svd : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name, compute_svd=True)
def _solve_triangular_lower(a, b):
import scipy.linalg
return scipy.linalg.solve_triangular(a, b, lower=True)
def lu(a):
"""
Compute the lu decomposition of a matrix.
Examples
--------
>>> p, l, u = da.linalg.lu(x) # doctest: +SKIP
Returns
-------
p: Array, permutation matrix
l: Array, lower triangular matrix with unit diagonal.
u: Array, upper triangular matrix
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform lu decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform lu decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform lu decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name_lu = 'lu-lu-' + token
name_p = 'lu-p-' + token
name_l = 'lu-l-' + token
name_u = 'lu-u-' + token
# for internal calculation
name_p_inv = 'lu-p-inv-' + token
name_l_permuted = 'lu-l-permute-' + token
name_u_transposed = 'lu-u-transpose-' + token
name_plu_dot = 'lu-plu-dot-' + token
name_lu_dot = 'lu-lu-dot-' + token
dsk = {}
for i in range(min(vdim, hdim)):
target = (a.name, i, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, i, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# diagonal block
dsk[name_lu, i, i] = (scipy.linalg.lu, target)
# sweep to horizontal
for j in range(i + 1, hdim):
target = (np.dot, (name_p_inv, i, i), (a.name, i, j))
if i > 0:
prevs = []
for p in range(i):
prev = name_lu_dot, i, p, p, j
dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_lu, i, j] = (_solve_triangular_lower,
(name_l, i, i), target)
# sweep to vertical
for k in range(i + 1, vdim):
target = (a.name, k, i)
if i > 0:
prevs = []
for p in range(i):
prev = name_plu_dot, k, p, p, i
dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
# solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T
dsk[name_lu, k, i] = (np.transpose,
(_solve_triangular_lower,
(name_u_transposed, i, i),
(np.transpose, target)))
for i in range(min(vdim, hdim)):
for j in range(min(vdim, hdim)):
if i == j:
dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)
dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)
dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)
# permuted l is required to be propagated to i > j blocks
dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))
dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))
# transposed permutation matrix is equal to its inverse
dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))
elif i > j:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
# calculations are performed using permuted l,
# thus the result should be reverted by inverted (=transposed) p
# to have the same row order as diagonal blocks
dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))
dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l_permuted, i, j] = (name_lu, i, j)
else:
dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_u, i, j] = (name_lu, i, j)
# l_permuted is not referred in upper triangulars
dsk.update(a.dask)
pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))
p = Array(dsk, name_p, shape=a.shape, chunks=a.chunks, dtype=pp.dtype)
l = Array(dsk, name_l, shape=a.shape, chunks=a.chunks, dtype=ll.dtype)
u = Array(dsk, name_u, shape=a.shape, chunks=a.chunks, dtype=uu.dtype)
return p, l, u
def solve_triangular(a, b, lower=False):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
Returns
-------
x : (M,) or (M, N) array
Solution to the system `a x = b`. Shape of return matches `b`.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('a must be 2 dimensional')
if b.ndim <= 2:
if a.shape[1] != b.shape[0]:
raise ValueError('a.shape[1] and b.shape[0] must be equal')
if a.chunks[1] != b.chunks[0]:
msg = ('a.chunks[1] and b.chunks[0] must be equal. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
else:
raise ValueError('b must be 1 or 2 dimensional')
vchunks = len(a.chunks[1])
hchunks = 1 if b.ndim == 1 else len(b.chunks[1])
token = tokenize(a, b, lower)
name = 'solve-triangular-' + token
# for internal calculation
# (name, i, j, k, l) corresponds to a_ij.dot(b_kl)
name_mdot = 'solve-tri-dot-' + token
def _b_init(i, j):
if b.ndim == 1:
return b.name, i
else:
return b.name, i, j
def _key(i, j):
if b.ndim == 1:
return name, i
else:
return name, i, j
dsk = {}
if lower:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i > 0:
prevs = []
for k in range(i):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)
else:
for i in range(vchunks):
for j in range(hchunks):
target = _b_init(i, j)
if i < vchunks - 1:
prevs = []
for k in range(i + 1, vchunks):
prev = name_mdot, i, k, k, j
dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[_key(i, j)] = (scipy.linalg.solve_triangular, (a.name, i, i), target)
dsk.update(a.dask)
dsk.update(b.dask)
res = _solve_triangular_lower(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
return Array(dsk, name, shape=b.shape, chunks=b.chunks, dtype=res.dtype)
def solve(a, b, sym_pos=False):
"""
Solve the equation ``a x = b`` for ``x``. By default, use LU
decomposition and forward / backward substitutions. When ``sym_pos`` is
``True``, use Cholesky decomposition.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume a is symmetric and positive definite. If ``True``, use Cholesky
decomposition.
Returns
-------
x : (M,) or (M, N) Array
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
"""
if sym_pos:
l, u = _cholesky(a)
else:
p, l, u = lu(a)
b = p.T.dot(b)
uy = solve_triangular(l, b, lower=True)
return solve_triangular(u, uy)
def inv(a):
"""
Compute the inverse of a matrix with LU decomposition and
forward / backward substitutions.
Parameters
----------
a : array_like
Square matrix to be inverted.
Returns
-------
ainv : Array
Inverse of the matrix `a`.
"""
return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))
def _cholesky_lower(a):
import scipy.linalg
return scipy.linalg.cholesky(a, lower=True)
def cholesky(a, lower=False):
"""
Returns the Cholesky decomposition, :math:`A = L L^*` or
:math:`A = U^* U` of a Hermitian positive-definite matrix A.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky
factorization. Default is upper-triangular.
Returns
-------
c : (M, M) Array
Upper- or lower-triangular Cholesky factor of `a`.
"""
l, u = _cholesky(a)
if lower:
return l
else:
return u
def _cholesky(a):
"""
Private function to perform Cholesky decomposition, which returns both
lower and upper triangulars.
"""
import scipy.linalg
if a.ndim != 2:
raise ValueError('Dimension must be 2 to perform cholesky decomposition')
xdim, ydim = a.shape
if xdim != ydim:
raise ValueError('Input must be a square matrix to perform cholesky decomposition')
if not len(set(a.chunks[0] + a.chunks[1])) == 1:
msg = ('All chunks must be a square matrix to perform cholesky decomposition. '
'Use .rechunk method to change the size of chunks.')
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name = 'cholesky-' + token
# (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)
name_lt_dot = 'cholesky-lt-dot-' + token
# because transposed results are needed for calculation,
# we can build graph for upper triangular simultaneously
name_upper = 'cholesky-upper-' + token
# calculates lower triangulars because subscriptions get simpler
dsk = {}
for i in range(vdim):
for j in range(hdim):
if i < j:
dsk[name, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[name_upper, j, i] = (name, i, j)
elif i == j:
target = (a.name, i, j)
if i > 0:
prevs = []
for p in range(i):
prev = name_lt_dot, i, p, i, p
dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name, i, i] = (_cholesky_lower, target)
dsk[name_upper, i, i] = (np.transpose, (name, i, i))
else:
# solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to
# L11.dot(x.T) = A21.T - L10.dot(L20.T)
# L11.dot(x.T) = A12 - L10.dot(L02)
target = (a.name, j, i)
if j > 0:
prevs = []
for p in range(j):
prev = name_lt_dot, j, p, i, p
dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[name_upper, j, i] = (_solve_triangular_lower,(name, j, j), target)
dsk[name, i, j] = (np.transpose, (name_upper, j, i))
dsk.update(a.dask)
cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
lower = Array(dsk, name, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
# do not use .T, because part of transposed blocks are already calculated
upper = Array(dsk, name_upper, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
return lower, upper
def _sort_decreasing(x):
x[::-1].sort()
return x
def lstsq(a, b):
"""
Return the least-squares solution to a linear matrix equation using
QR decomposition.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : (M,) array_like
Ordinate or "dependent variable" values.
Returns
-------
x : (N,) Array
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : (1,) Array
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
rank : Array
Rank of matrix `a`.
s : (min(M, N),) Array
Singular values of `a`.
"""
q, r = qr(a)
x = solve_triangular(r, q.T.dot(b))
residuals = b - a.dot(x)
residuals = (residuals ** 2).sum(keepdims=True)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = 'lstsq-rank-' + token
rdsk = {(rname, ): (np.linalg.matrix_rank, (r.name, 0, 0))}
rdsk.update(r.dask)
# rank must be an integer
rank = Array(rdsk, rname, shape=(), chunks=(), dtype=int)
# singular
sname = 'lstsq-singular-' + token
rt = r.T
sdsk = {(sname, 0): (_sort_decreasing,
(np.sqrt,
(np.linalg.eigvals,
(np.dot, (rt.name, 0, 0), (r.name, 0, 0)))))}
sdsk.update(rt.dask)
_, _, _, ss = np.linalg.lstsq(np.array([[1, 0], [1, 2]], dtype=a.dtype),
np.array([0, 1], dtype=b.dtype))
s = Array(sdsk, sname, shape=(r.shape[0], ),
chunks=r.shape[0], dtype=ss.dtype)
return x, residuals, rank, s
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/array/linalg.py",
"copies": "1",
"size": "25941",
"license": "mit",
"hash": 6704759274007398000,
"line_mean": 32.5588615783,
"line_max": 91,
"alpha_frac": 0.5340580548,
"autogenerated": false,
"ratio": 3.2540140491721026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9286522388244971,
"avg_score": 0.00030994314542634265,
"num_lines": 773
} |
from __future__ import absolute_import, division, print_function
import operator
import numpy as np
from ..base import tokenize
from .core import top, dotmany, Array
from .random import RandomState
def _cumsum_blocks(it):
total = 0
for x in it:
total_previous = total
total += x
yield (total_previous, total)
def tsqr(data, name=None, compute_svd=False):
""" Direct Tall-and-Skinny QR algorithm
As presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
This algorithm is used to compute both the QR decomposition and the
Singular Value Decomposition. It requires that the input array have a
single column of blocks, each of which fit in memory.
Parameters
----------
data: Array
compute_svd: bool
Whether to compute the SVD rather than the QR decomposition
See Also
--------
dask.array.linalg.qr - Powered by this algorithm
dask.array.linalg.svd - Powered by this algorithm
"""
if not (data.ndim == 2 and # Is a matrix
len(data.chunks[1]) == 1): # Only one column block
raise ValueError(
"Input must have the following properites:\n"
" 1. Have two dimensions\n"
" 2. Have only one column of blocks")
prefix = name or 'tsqr-' + tokenize(data, compute_svd)
prefix += '_'
m, n = data.shape
numblocks = (len(data.chunks[0]), 1)
name_qr_st1 = prefix + 'QR_st1'
dsk_qr_st1 = top(np.linalg.qr, name_qr_st1, 'ij', data.name, 'ij',
numblocks={data.name: numblocks})
# qr[0]
name_q_st1 = prefix + 'Q_st1'
dsk_q_st1 = dict(((name_q_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 0))
for i in range(numblocks[0]))
# qr[1]
name_r_st1 = prefix + 'R_st1'
dsk_r_st1 = dict(((name_r_st1, i, 0),
(operator.getitem, (name_qr_st1, i, 0), 1))
for i in range(numblocks[0]))
# Stacking for in-core QR computation
to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]
name_r_st1_stacked = prefix + 'R_st1_stacked'
dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack,
(tuple, to_stack))}
# In-core QR computation
name_qr_st2 = prefix + 'QR_st2'
dsk_qr_st2 = top(np.linalg.qr, name_qr_st2, 'ij', name_r_st1_stacked, 'ij',
numblocks={name_r_st1_stacked: (1, 1)})
# qr[0]
name_q_st2_aux = prefix + 'Q_st2_aux'
dsk_q_st2_aux = {(name_q_st2_aux, 0, 0): (operator.getitem,
(name_qr_st2, 0, 0), 0)}
q2_block_sizes = [min(e, n) for e in data.chunks[0]]
block_slices = [(slice(e[0], e[1]), slice(0, n))
for e in _cumsum_blocks(q2_block_sizes)]
name_q_st2 = prefix + 'Q_st2'
dsk_q_st2 = dict(((name_q_st2, i, 0),
(operator.getitem, (name_q_st2_aux, 0, 0), b))
for i, b in enumerate(block_slices))
# qr[1]
name_r_st2 = prefix + 'R'
dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}
name_q_st3 = prefix + 'Q'
dsk_q_st3 = top(np.dot, name_q_st3, 'ij', name_q_st1, 'ij',
name_q_st2, 'ij', numblocks={name_q_st1: numblocks,
name_q_st2: numblocks})
dsk_q = {}
dsk_q.update(data.dask)
dsk_q.update(dsk_qr_st1)
dsk_q.update(dsk_q_st1)
dsk_q.update(dsk_r_st1)
dsk_q.update(dsk_r_st1_stacked)
dsk_q.update(dsk_qr_st2)
dsk_q.update(dsk_q_st2_aux)
dsk_q.update(dsk_q_st2)
dsk_q.update(dsk_q_st3)
dsk_r = {}
dsk_r.update(data.dask)
dsk_r.update(dsk_qr_st1)
dsk_r.update(dsk_r_st1)
dsk_r.update(dsk_r_st1_stacked)
dsk_r.update(dsk_qr_st2)
dsk_r.update(dsk_r_st2)
if not compute_svd:
q = Array(dsk_q, name_q_st3, shape=data.shape, chunks=data.chunks)
r = Array(dsk_r, name_r_st2, shape=(n, n), chunks=(n, n))
return q, r
else:
# In-core SVD computation
name_svd_st2 = prefix + 'SVD_st2'
dsk_svd_st2 = top(np.linalg.svd, name_svd_st2, 'ij', name_r_st2, 'ij',
numblocks={name_r_st2: (1, 1)})
# svd[0]
name_u_st2 = prefix + 'U_st2'
dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 0)}
# svd[1]
name_s_st2 = prefix + 'S'
dsk_s_st2 = {(name_s_st2, 0): (operator.getitem,
(name_svd_st2, 0, 0), 1)}
# svd[2]
name_v_st2 = prefix + 'V'
dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem,
(name_svd_st2, 0, 0), 2)}
# Q * U
name_u_st4 = prefix + 'U'
dsk_u_st4 = top(dotmany, name_u_st4, 'ij', name_q_st3, 'ik',
name_u_st2, 'kj', numblocks={name_q_st3: numblocks,
name_u_st2: (1, 1)})
dsk_u = {}
dsk_u.update(dsk_q)
dsk_u.update(dsk_r)
dsk_u.update(dsk_svd_st2)
dsk_u.update(dsk_u_st2)
dsk_u.update(dsk_u_st4)
dsk_s = {}
dsk_s.update(dsk_r)
dsk_s.update(dsk_svd_st2)
dsk_s.update(dsk_s_st2)
dsk_v = {}
dsk_v.update(dsk_r)
dsk_v.update(dsk_svd_st2)
dsk_v.update(dsk_v_st2)
u = Array(dsk_u, name_u_st4, shape=data.shape, chunks=data.chunks)
s = Array(dsk_s, name_s_st2, shape=(n,), chunks=(n, n))
v = Array(dsk_v, name_v_st2, shape=(n, n), chunks=(n, n))
return u, s, v
def compression_level(n, q, oversampling=10, min_subspace_size=20):
""" Compression level to use in svd_compressed
Given the size ``n`` of a space, compress that that to one of size
``q`` plus oversampling.
The oversampling allows for greater flexibility in finding an
appropriate subspace, a low value is often enough (10 is already a
very conservative choice, it can be further reduced).
``q + oversampling`` should not be larger than ``n``. In this
specific implementation, ``q + oversampling`` is at least
``min_subspace_size``.
>>> compression_level(100, 10)
20
"""
return min(max(min_subspace_size, q + oversampling), n)
def compression_matrix(data, q, n_power_iter=0, seed=None):
""" Randomly sample matrix to find most active subspace
This compression matrix returned by this algorithm can be used to
compute both the QR decomposition and the Singular Value
Decomposition.
Parameters
----------
data: Array
q: int
Size of the desired subspace (the actual size will be bigger,
because of oversampling, see ``da.linalg.compression_level``)
n_power_iter: int
number of power iterations, useful when the singular values of
the input matrix decay very slowly.
Algorithm Citation
------------------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
"""
n = data.shape[1]
comp_level = compression_level(n, q)
state = RandomState(seed)
omega = state.standard_normal(size=(n, comp_level), chunks=(data.chunks[1],
(comp_level,)))
mat_h = data.dot(omega)
for j in range(n_power_iter):
mat_h = data.dot(data.T.dot(mat_h))
q, _ = tsqr(mat_h)
return q.T
def svd_compressed(a, k, n_power_iter=0, seed=None, name=None):
""" Randomly compressed rank-k thin Singular Value Decomposition.
This computes the approximate singular value decomposition of a large
array. This algorithm is generally faster than the normal algorithm
but does not provide exact results. One can balance between
performance and accuracy with input parameters (see below).
Parameters
----------
a: Array
Input array
k: int
Rank of the desired thin SVD decomposition.
n_power_iter: int
Number of power iterations, useful when the singular values
decay slowly. Error decreases exponentially as n_power_iter
increases. In practice, set n_power_iter <= 4.
Algorithm Citation
------------------
N. Halko, P. G. Martinsson, and J. A. Tropp.
Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions.
SIAM Rev., Survey and Review section, Vol. 53, num. 2,
pp. 217-288, June 2011
http://arxiv.org/abs/0909.4061
Examples
--------
>>> u, s, vt = svd_compressed(x, 20) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
"""
comp = compression_matrix(a, k, n_power_iter=n_power_iter, seed=seed)
a_compressed = comp.dot(a)
v, s, u = tsqr(a_compressed.T, name, compute_svd=True)
u = comp.T.dot(u)
v = v.T
u = u[:, :k]
s = s[:k]
v = v[:k, :]
return u, s, v
def qr(a, name=None):
"""
Compute the qr factorization of a matrix.
Examples
--------
>>> q, r = da.linalg.qr(x) # doctest: +SKIP
Returns
-------
q: Array, orthonormal
r: Array, upper-triangular
See Also
--------
np.linalg.qr : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name)
def svd(a, name=None):
"""
Compute the singular value decomposition of a matrix.
Examples
--------
>>> u, s, v = da.linalg.svd(x) # doctest: +SKIP
Returns
-------
u: Array, unitary / orthogonal
s: Array, singular values in decreasing order (largest first)
v: Array, unitary / orthogonal
See Also
--------
np.linalg.svd : Equivalent NumPy Operation
dask.array.linalg.tsqr: Actual implementation with citation
"""
return tsqr(a, name, compute_svd=True)
| {
"repo_name": "pombredanne/dask",
"path": "dask/array/linalg.py",
"copies": "2",
"size": "10601",
"license": "bsd-3-clause",
"hash": 2106684431642075400,
"line_mean": 31.1242424242,
"line_max": 80,
"alpha_frac": 0.5639090652,
"autogenerated": false,
"ratio": 3.174902665468703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4738811730668703,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
import numpy as np
from glue.external import six
from glue.core.component_link import BinaryComponentLink
from glue.core.subset import InequalitySubsetState
__all__ = ['PixelComponentID', 'ComponentID', 'PixelComponentID', 'ComponentIDDict']
# access to ComponentIDs via .item[name]
class ComponentIDDict(object):
def __init__(self, data, **kwargs):
self.data = data
def __getitem__(self, key):
result = self.data.find_component_id(key)
if result is None:
raise KeyError("ComponentID not found or not unique: %s"
% key)
return result
class ComponentID(object):
"""
References a :class:`glue.core.component.Component` object within a :class:`~glue.core.data.Data` object.
ComponentIDs behave as keys::
component_id = data.id[name]
data[component_id] -> numpy array
"""
def __init__(self, label, hidden=False):
""":param label: Name for the ID
:type label: str"""
self._label = str(label)
self._hidden = hidden
@property
def label(self):
return self._label
@label.setter
def label(self, value):
"""Change label.
.. warning::
Label changes are not currently tracked by client
classes. Label's should only be changd before creating other
client objects
"""
self._label = str(value)
@property
def hidden(self):
"""Whether to hide the component by default"""
return self._hidden
def __str__(self):
return str(self._label)
def __repr__(self):
return str(self._label)
def __eq__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.eq)
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __ne__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.ne)
return other is not self
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
class PixelComponentID(ComponentID):
"""
The ID of a component which is a pixel position in the data - this allows
us to make assumptions in certain places. For example when a polygon
selection is done in pixel space, it can easily be broadcast along
dimensions.
"""
def __init__(self, axis, label, hidden=False):
self.axis = axis
super(PixelComponentID, self).__init__(label, hidden=hidden)
| {
"repo_name": "saimn/glue",
"path": "glue/core/component_id.py",
"copies": "1",
"size": "4185",
"license": "bsd-3-clause",
"hash": -4557751171656810500,
"line_mean": 27.8620689655,
"line_max": 109,
"alpha_frac": 0.6310633214,
"autogenerated": false,
"ratio": 4.039575289575289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5170638610975289,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import operator
def weight(items, **kwargs):
if not len(kwargs):
raise ValueError('Missing attribute for weighting items!')
scaled = []
for attr, weight in kwargs.items():
values = [float(getattr(item, attr)) for item in items]
try:
s = sum(values)
scaled.append([weight * (v / s) for v in values])
except ZeroDivisionError:
# s equals to zero, attr wont contribute
scaled.append([0] * len(items))
return map(sum, zip(*scaled))
def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip
def ffd(items, targets, **kwargs):
"""First-Fit Decreasing
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
This algorithm differs only from Next-Fit Decreasing
in having a 'sort'; that is, the items are pre-sorted
(largest to smallest).
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return ff(items, targets)
def mr(items, targets, **kwargs):
"""Max-Rest
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
capacities = [target - sum(content) for target, content in bins]
weighted = weight(capacities, **kwargs)
(target, content), capacity, _ = max(zip(bins, capacities, weighted),
key=operator.itemgetter(2))
if item <= capacity:
content.append(item)
else:
skip.append(item)
return bins, skip
def mrpq(items, targets):
"""Max-Rest Priority Queue
Complexity O(n*log(n))
"""
raise NotImplementedError()
def bf(items, targets, **kwargs):
"""Best-Fit
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
containers = []
capacities = []
for target, content in bins:
capacity = target - sum(content)
if item <= capacity:
containers.append(content)
capacities.append(capacity - item)
if len(capacities):
weighted = zip(containers, weight(capacities, **kwargs))
content, _ = min(weighted, key=operator.itemgetter(1))
content.append(item)
else:
skip.append(item)
return bins, skip
def bfd(items, targets, **kwargs):
"""Best-Fit Decreasing
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return bf(items, targets, **kwargs)
def bfh(items, targets):
"""Best-Fit-Heap
Slightly Improved Complexity
"""
raise NotImplementedError()
| {
"repo_name": "lensacom/satyr",
"path": "mentor/binpack.py",
"copies": "1",
"size": "3414",
"license": "apache-2.0",
"hash": 7031161350202498000,
"line_mean": 24.8636363636,
"line_max": 77,
"alpha_frac": 0.5869947276,
"autogenerated": false,
"ratio": 4.007042253521127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 132
} |
from __future__ import absolute_import, division, print_function
import os
from bcolz import ctable, carray
import numpy as np
from toolz import keyfilter
import datashape
from datashape import discover
import shutil
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..drop import drop
from ..chunks import chunks
keywords = ['cparams', 'dflt', 'expectedlen', 'chunklen', 'rootdir']
@discover.register((ctable, carray))
def discover_bcolz(c, **kwargs):
return datashape.from_numpy(c.shape, c.dtype)
@append.register((ctable, carray), np.ndarray)
def numpy_append_to_bcolz(a, b, **kwargs):
a.append(b)
a.flush()
return a
@append.register((ctable, carray), object)
def numpy_append_to_bcolz(a, b, **kwargs):
return append(a, convert(chunks(np.ndarray), b, **kwargs), **kwargs)
@convert.register(ctable, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_ctable(x, **kwargs):
return ctable(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(carray, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_carray(x, **kwargs):
return carray(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(np.ndarray, (carray, ctable), cost=1.0)
def convert_bcolz_to_numpy(x, **kwargs):
return x[:]
@append.register((carray, ctable), chunks(np.ndarray))
def append_carray_with_chunks(a, c, **kwargs):
for chunk in c:
append(a, chunk)
a.flush()
return a
@convert.register(chunks(np.ndarray), (ctable, carray), cost=1.2)
def bcolz_to_numpy_chunks(x, chunksize=2**20, **kwargs):
def load():
first_n = min(1000, chunksize)
first = x[:first_n]
yield first
for i in range(first_n, x.shape[0], chunksize):
yield x[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('.*\.bcolz/?')
def resource_bcolz(uri, dshape=None, expected_dshape=None, **kwargs):
if os.path.exists(uri):
try:
return ctable(rootdir=uri)
except IOError: # __rootdirs__ doesn't exist because we aren't a ctable
return carray(rootdir=uri)
else:
if not dshape:
raise ValueError("Must specify either existing bcolz directory or"
" valid datashape")
dshape = datashape.dshape(dshape)
dt = datashape.to_numpy_dtype(dshape)
shape_tail = tuple(map(int, dshape.shape[1:])) # tail of shape
if dshape.shape[0] == datashape.var:
shape = (0,) + shape_tail
else:
shape = (int(dshape.shape[0]),) + shape_tail
x = np.empty(shape=shape, dtype=dt)
kwargs = keyfilter(keywords.__contains__, kwargs)
expectedlen = kwargs.pop('expectedlen',
int(expected_dshape[0])
if expected_dshape is not None and
isinstance(expected_dshape[0], datashape.Fixed)
else None)
if datashape.predicates.isrecord(dshape.measure):
return ctable(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
else:
return carray(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
@drop.register((carray, ctable))
def drop_bcolz(b, **kwargs):
shutil.rmtree(b.rootdir)
ooc_types |= set((carray, ctable))
| {
"repo_name": "mrocklin/into",
"path": "into/backends/bcolz.py",
"copies": "1",
"size": "3377",
"license": "bsd-3-clause",
"hash": 8320109676157865000,
"line_mean": 29.7,
"line_max": 80,
"alpha_frac": 0.6295528576,
"autogenerated": false,
"ratio": 3.3435643564356434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44731172140356434,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from collections import defaultdict, Counter
from qtpy import QtWidgets, QtGui, QtCore
from qtpy.QtCore import Qt
from glue.core import ComponentID
from glue.core.parse import ParsedComponentLink, ParsedCommand
from glue.utils.qt import load_ui
from glue.core.message import NumericalDataChangedMessage
from glue.dialogs.component_manager.qt.equation_editor import EquationEditorDialog
__all__ = ['ComponentManagerWidget']
class ComponentTreeWidget(QtWidgets.QTreeWidget):
order_changed = QtCore.Signal()
def select_cid(self, cid):
for item in self:
if item.data(0, Qt.UserRole) is cid:
self.select_item(item)
return
raise ValueError("Could not find find cid {0} in list".format(cid))
def select_item(self, item):
self.selection = self.selectionModel()
self.selection.select(QtCore.QItemSelection(self.indexFromItem(item, 0),
self.indexFromItem(item, self.columnCount() - 1)),
QtCore.QItemSelectionModel.ClearAndSelect)
@property
def selected_item(self):
items = self.selectedItems()
return items[0] if len(items) == 1 else None
@property
def selected_cid(self):
selected = self.selected_item
return None if selected is None else selected.data(0, Qt.UserRole)
def add_cid_and_label(self, cid, label):
item = QtWidgets.QTreeWidgetItem(self.invisibleRootItem(), [label])
item.setData(0, Qt.UserRole, cid)
item.setFlags(item.flags() | Qt.ItemIsEditable)
item.setFlags(item.flags() ^ Qt.ItemIsDropEnabled)
def __iter__(self):
root = self.invisibleRootItem()
for idx in range(root.childCount()):
yield root.child(idx)
def __len__(self):
return self.invisibleRootItem().childCount()
def dropEvent(self, event):
selected = self.selected_item
super(ComponentTreeWidget, self).dropEvent(event)
self.select_item(selected)
self.order_changed.emit()
def mousePressEvent(self, event):
self.clearSelection()
super(ComponentTreeWidget, self).mousePressEvent(event)
class ComponentManagerWidget(QtWidgets.QDialog):
def __init__(self, data_collection=None, parent=None):
super(ComponentManagerWidget, self).__init__(parent=parent)
self.ui = load_ui('component_manager.ui', self,
directory=os.path.dirname(__file__))
self.list = {}
self.list['main'] = self.ui.list_main_components
self.list['derived'] = self.ui.list_derived_components
self.data_collection = data_collection
self._components = defaultdict(lambda: defaultdict(list))
self._state = defaultdict(dict)
for data in data_collection:
for cid in data.primary_components:
if not cid.hidden:
comp_state = {}
comp_state['cid'] = cid
comp_state['label'] = cid.label
self._state[data][cid] = comp_state
self._components[data]['main'].append(cid)
self._components[data]['derived'] = []
for cid in data.derived_components:
comp = data.get_component(cid)
if isinstance(comp.link, ParsedComponentLink):
comp_state = {}
comp_state['cid'] = cid
comp_state['label'] = cid.label
comp_state['equation'] = comp.link._parsed
self._state[data][cid] = comp_state
self._components[data]['derived'].append(cid)
# Populate data combo
for data in self.data_collection:
self.ui.combosel_data.addItem(data.label, userData=data)
self.ui.combosel_data.setCurrentIndex(0)
self.ui.combosel_data.currentIndexChanged.connect(self._update_component_lists)
self._update_component_lists()
self.ui.button_remove_main.clicked.connect(self._remove_main_component)
self.ui.button_add_derived.clicked.connect(self._add_derived_component)
self.ui.button_edit_derived.clicked.connect(self._edit_derived_component)
self.ui.button_remove_derived.clicked.connect(self._remove_derived_component)
self.ui.list_main_components.itemSelectionChanged.connect(self._update_selection_main)
self.ui.list_derived_components.itemSelectionChanged.connect(self._update_selection_derived)
self._update_selection_main()
self._update_selection_derived()
self.ui.list_main_components.itemChanged.connect(self._update_state)
self.ui.list_derived_components.itemChanged.connect(self._update_state)
self.ui.list_main_components.order_changed.connect(self._update_state)
self.ui.list_derived_components.order_changed.connect(self._update_state)
self.ui.button_ok.clicked.connect(self.accept)
self.ui.button_cancel.clicked.connect(self.reject)
def _update_selection_main(self):
enabled = self.list['main'].selected_cid is not None
self.button_remove_main.setEnabled(enabled)
def _update_selection_derived(self):
enabled = self.list['derived'].selected_cid is not None
self.button_edit_derived.setEnabled(enabled)
self.button_remove_derived.setEnabled(enabled)
@property
def data(self):
try:
return self.ui.combosel_data.currentData()
except AttributeError: # PyQt4
return self.ui.combosel_data.itemData(self.ui.combosel_data.currentIndex())
def _update_component_lists(self, *args):
# This gets called when the data is changed and we need to update the
# components shown in the lists.
for component_list in ('main', 'derived'):
self.list[component_list].blockSignals(True)
self.list[component_list].clear()
for cid in self._components[self.data][component_list]:
self.list[component_list].add_cid_and_label(cid, self._state[self.data][cid]['label'])
self.list[component_list].blockSignals(False)
self._validate()
def _validate(self):
# Construct a list of all labels for the current dataset so that
# we can check which ones are duplicates
labels = [c['label'] for c in self._state[self.data].values()]
if len(labels) == 0:
return
label_count = Counter(labels)
if label_count.most_common(1)[0][1] > 1:
# If we are here, there are duplicates somewhere in the list
# of components.
brush_red = QtGui.QBrush(Qt.red)
brush_black = QtGui.QBrush(Qt.black)
for component_list in ('main', 'derived'):
self.list[component_list].blockSignals(True)
for item in self.list[component_list]:
label = item.text(0)
if label_count[label] > 1:
item.setForeground(0, brush_red)
else:
item.setForeground(0, brush_black)
self.list[component_list].blockSignals(False)
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText('Error: some components have duplicate names')
self.ui.button_ok.setEnabled(False)
self.ui.combosel_data.setEnabled(False)
else:
self.ui.label_status.setStyleSheet('')
self.ui.label_status.setText('')
self.ui.button_ok.setEnabled(True)
self.ui.combosel_data.setEnabled(True)
def _update_state(self, *args):
for component_list in ('main', 'derived'):
self._components[self.data][component_list] = []
for item in self.list[component_list]:
cid = item.data(0, Qt.UserRole)
self._state[self.data][cid]['label'] = item.text(0)
self._components[self.data][component_list].append(cid)
self._update_component_lists()
def _remove_main_component(self, *args):
cid = self.list['main'].selected_cid
if cid is not None:
self._components[self.data]['main'].remove(cid)
self._state[self.data].pop(cid)
self._update_component_lists()
def _remove_derived_component(self, *args):
cid = self.list['derived'].selected_cid
if cid is not None:
self._components[self.data]['derived'].remove(cid)
self._state[self.data].pop(cid)
self._update_component_lists()
def _add_derived_component(self, *args):
comp_state = {}
comp_state['cid'] = ComponentID('')
comp_state['label'] = 'New component'
comp_state['equation'] = None
self._components[self.data]['derived'].append(comp_state['cid'])
self._state[self.data][comp_state['cid']] = comp_state
self._update_component_lists()
self.list['derived'].select_cid(comp_state['cid'])
self._edit_derived_component()
def _edit_derived_component(self, event=None):
mapping = {}
references = {}
for cid in self._components[self.data]['main']:
label = self._state[self.data][cid]['label']
mapping[cid] = label
references[label] = cid
cid = self.list['derived'].selected_cid
if self._state[self.data][cid]['equation'] is None:
equation = None
else:
equation = self._state[self.data][cid]['equation'].render(mapping)
dialog = EquationEditorDialog(equation=equation, references=references, parent=self)
dialog.setWindowFlags(self.windowFlags() | Qt.Window)
dialog.setFocus()
dialog.raise_()
dialog.exec_()
if dialog.final_expression is None:
return
self._state[self.data][cid]['equation'] = dialog._get_parsed_command()
def accept(self):
for data in self._components:
cids_main = self._components[data]['main']
cids_derived = self._components[data]['derived']
# First deal with renaming of components
for cid_new in cids_main + cids_derived:
label = self._state[data][cid_new]['label']
if label != cid_new.label:
cid_new.label = label
cids_all = data.pixel_component_ids + data.world_component_ids + cids_main + cids_derived
cids_existing = data.components
for cid_old in cids_existing:
if not any(cid_old is cid_new for cid_new in cids_all):
data.remove_component(cid_old)
components = dict((cid.uuid, cid) for cid in data.components)
for cid_new in cids_derived:
if any(cid_new is cid_old for cid_old in cids_existing):
comp = data.get_component(cid_new)
if comp.link._parsed._cmd != self._state[data][cid_new]['equation']._cmd:
comp.link._parsed._cmd = self._state[data][cid_new]['equation']._cmd
comp.link._parsed._references = components
if data.hub:
msg = NumericalDataChangedMessage(data)
data.hub.broadcast(msg)
else:
pc = ParsedCommand(self._state[data][cid_new]['equation']._cmd, components)
link = ParsedComponentLink(cid_new, pc)
data.add_component_link(link)
data.reorder_components(cids_all)
super(ComponentManagerWidget, self).accept()
if __name__ == "__main__": # pragma: nocover
from glue.utils.qt import get_qapp
app = get_qapp()
import numpy as np
from glue.core.data import Data
from glue.core.data_collection import DataCollection
x = np.random.random((5, 5))
y = x * 3
dc = DataCollection()
dc.append(Data(label='test1', x=x, y=y))
dc.append(Data(label='test2', a=x, b=y))
widget = ComponentManagerWidget(dc)
widget.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/component_manager/qt/component_manager.py",
"copies": "1",
"size": "12326",
"license": "bsd-3-clause",
"hash": 5105669272021516000,
"line_mean": 35.0409356725,
"line_max": 102,
"alpha_frac": 0.6010871329,
"autogenerated": false,
"ratio": 4.030739045127534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005459414497715842,
"num_lines": 342
} |
from __future__ import absolute_import, division, print_function
import os
from collections import defaultdict
from typing import Dict, Text
_FIGURE_LIST = defaultdict(dict)
class Visualizer(object):
""" Visualizer """
def assert_figure(self, fig):
from matplotlib import pyplot as plt
assert isinstance(fig, plt.Figure), \
'fig must be instance of matplotlib.Figure, but given: %s' % str(type(fig))
return fig
def assert_axis(self, ax):
from matplotlib import pyplot as plt
from odin.visual.figures import to_axis
ax = to_axis(ax)
assert isinstance(ax, plt.Axes), \
'ax must be instance of matplotlib.Axes, but given: %s' % str(type(ax))
return ax
def get_figures(self) -> Dict[Text, 'Figure']:
return _FIGURE_LIST[id(self)]
def add_figure(self, name, fig):
from matplotlib import pyplot as plt
self.assert_figure(fig)
_FIGURE_LIST[id(self)][name] = fig
return self
def save_figures(self,
path='/tmp/tmp.pdf',
dpi=None,
separate_files=True,
clear_figures=True,
verbose=False):
from odin.utils import ctext
from matplotlib import pyplot as plt
# checking arguments
if os.path.isfile(path) or '.pdf' == path[-4:].lower():
separate_files = False
assert '.pdf' == path[-4:].lower(), \
"If a file is given, it must be PDF file"
figures = _FIGURE_LIST[id(self)]
n_figures = len(figures)
if n_figures == 0:
return self
# ====== saving PDF file ====== #
if verbose:
print("Saving %s figures to path: " % ctext(n_figures, 'lightcyan'),
ctext(path, 'lightyellow'))
if not separate_files:
if dpi is None:
dpi = 48
if '.pdf' not in path:
path = path + '.pdf'
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(path)
for key, fig in figures.items():
try:
fig.savefig(pp, dpi=dpi, format='pdf', bbox_inches="tight")
if verbose:
print(" - Saved '%s' to pdf file" % ctext(key, 'cyan'))
except Exception as e:
if verbose:
print(" - Error '%s'" % ctext(key, 'cyan'))
print(" ", e)
pp.close()
# ====== saving PNG file ====== #
else:
if dpi is None:
dpi = 160
if not os.path.exists(path):
os.mkdir(path)
assert os.path.isdir(path), "'%s' must be path to a folder" % path
kwargs = dict(dpi=dpi, bbox_inches="tight")
for key, fig in figures.items():
out_path = os.path.join(path, key + '.png')
try:
fig.savefig(out_path, **kwargs)
if verbose:
print(" - Saved '%s' to %s" %
(ctext(key, 'cyan'), ctext(out_path, 'yellow')))
except Exception as e:
if verbose:
print(" - Error '%s'" % ctext(key, 'cyan'))
print(" ", e)
# ====== clear figures ====== #
if clear_figures:
for fig in figures.values():
plt.close(fig)
figures.clear()
return self
| {
"repo_name": "imito/odin",
"path": "odin/visual/base.py",
"copies": "1",
"size": "3121",
"license": "mit",
"hash": -7766540330473742000,
"line_mean": 30.5252525253,
"line_max": 79,
"alpha_frac": 0.5616789491,
"autogenerated": false,
"ratio": 3.599769319492503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4661448268592503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from collections import deque, OrderedDict
from qtpy import QtWidgets, QtCore
from qtpy.QtCore import Qt
from glue.core.parse import InvalidTagError, ParsedCommand, TAG_RE
from glue.utils.qt import load_ui, CompletionTextEdit
__all__ = ['EquationEditorDialog']
class ColorizedCompletionTextEdit(CompletionTextEdit):
updated = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(ColorizedCompletionTextEdit, self).__init__(*args, **kwargs)
self.setAlignment(Qt.AlignLeft)
self.setUndoRedoEnabled(False)
self._undo_stack = deque(maxlen=100)
self._redo_stack = deque(maxlen=100)
def insertPlainText(self, *args):
super(ColorizedCompletionTextEdit, self).insertPlainText(*args)
self.reformat_text()
self.updated.emit()
self.setAlignment(Qt.AlignLeft)
def keyReleaseEvent(self, event):
super(ColorizedCompletionTextEdit, self).keyReleaseEvent(event)
self.reformat_text()
self.updated.emit()
def keyPressEvent(self, event):
super(ColorizedCompletionTextEdit, self).keyPressEvent(event)
# NOTE: We use == here instead of & for the modifiers because we don't
# want to catch e.g. control-shift-z or other combinations.
if event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_Z:
if len(self._undo_stack) > 1:
self._undo_stack.pop()
self.setHtml(self._undo_stack[-1])
text = self.toPlainText()
tc = self.textCursor()
tc.setPosition(len(text))
self.setTextCursor(tc)
self._cache = self._undo_stack[-1]
self.updated.emit()
def reformat_text(self):
# If the text hasn't changed, no need to reformat
if self.toPlainText() == getattr(self, '_cache', None):
return
# Here every time a key is released, we re-colorize the expression.
# We show valid components in blue, and invalid ones in red. We
# recognized components because they contain a ":" which is not valid
# Python syntax (except if one considers lambda functions, but we can
# probably ignore that here)
text = self.toPlainText()
def format_components(m):
component = m.group(0)
if component in self.word_list:
return "<font color='#0072B2'><b>" + component + "</b></font>"
else:
return "<font color='#D55E00'><b>" + component + "</b></font>"
html = TAG_RE.sub(format_components, text)
tc = self.textCursor()
pos = tc.position()
self._undo_stack.append(html)
self.setHtml(html)
# Sometimes the HTML gets rid of double spaces so we have to make
# sure the position isn't greater than the text length.
text = self.toPlainText()
pos = min(pos, len(text))
tc.setPosition(pos)
self.setTextCursor(tc)
self._cache = self.toPlainText()
class EquationEditorDialog(QtWidgets.QDialog):
def __init__(self, data=None, equation=None, references=None, parent=None):
super(EquationEditorDialog, self).__init__(parent=parent)
self.ui = load_ui('equation_editor.ui', self,
directory=os.path.dirname(__file__))
self.equation = equation
# Get mapping from label to component ID
if references is not None:
self.references = references
elif data is not None:
self.references = OrderedDict()
for cid in data.primary_components:
self.references[cid.label] = cid
# Populate component combo
for label, cid in self.references.items():
self.ui.combosel_component.addItem(label, userData=cid)
# Set up labels for auto-completion
labels = ['{' + label + '}' for label in self.references]
self.ui.expression.set_word_list(labels)
self.ui.expression.insertPlainText(equation)
self.ui.button_ok.clicked.connect(self.accept)
self.ui.button_cancel.clicked.connect(self.reject)
self.ui.button_insert.clicked.connect(self._insert_component)
self.ui.expression.updated.connect(self._update_status)
self._update_status()
def _insert_component(self):
label = self.ui.combosel_component.currentText()
self.expression.insertPlainText('{' + label + '}')
def _update_status(self):
# If the text hasn't changed, no need to check again
if hasattr(self, '_cache') and self._get_raw_command() == self._cache:
return
if self._get_raw_command() == "":
self.ui.label_status.setText("")
self.ui.button_ok.setEnabled(False)
else:
try:
pc = self._get_parsed_command()
pc.evaluate_test()
except SyntaxError:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText("Incomplete or invalid syntax")
self.ui.button_ok.setEnabled(False)
except InvalidTagError as exc:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText("Invalid component: {0}".format(exc.tag))
self.ui.button_ok.setEnabled(False)
except Exception as exc:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText(str(exc))
self.ui.button_ok.setEnabled(False)
else:
self.ui.label_status.setStyleSheet('color: green')
self.ui.label_status.setText("Valid expression")
self.ui.button_ok.setEnabled(True)
self._cache = self._get_raw_command()
def _get_raw_command(self):
return str(self.ui.expression.toPlainText())
def _get_parsed_command(self):
expression = self._get_raw_command()
return ParsedCommand(expression, self.references)
def accept(self):
self.final_expression = self._get_parsed_command()._cmd
super(EquationEditorDialog, self).accept()
def reject(self):
self.final_expression = None
super(EquationEditorDialog, self).reject()
if __name__ == "__main__": # pragma: nocover
from glue.utils.qt import get_qapp
app = get_qapp()
from glue.core.data import Data
d = Data(label='test1', x=[1, 2, 3], y=[2, 3, 4], z=[3, 4, 5])
widget = EquationEditorDialog(d, '')
widget.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/dialogs/component_manager/qt/equation_editor.py",
"copies": "1",
"size": "6696",
"license": "bsd-3-clause",
"hash": -8878158159281217000,
"line_mean": 34.4285714286,
"line_max": 86,
"alpha_frac": 0.6087216249,
"autogenerated": false,
"ratio": 3.9904648390941597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509918646399416,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from collections import OrderedDict
import numpy as np
from astropy.io import fits
from glue.config import subset_mask_importer, subset_mask_exporter
from glue.core.data_factories.fits import is_fits
@subset_mask_importer(label='FITS', extension=['fits', 'fit',
'fits.gz', 'fit.gz'])
def fits_subset_mask_importer(filename):
if not is_fits(filename):
raise IOError("File {0} is not a valid FITS file".format(filename))
masks = OrderedDict()
label = os.path.basename(filename).rpartition('.')[0]
with fits.open(filename) as hdulist:
for ihdu, hdu in enumerate(hdulist):
if hdu.data is not None and hdu.data.dtype.kind == 'i':
if not hdu.name:
name = '{0}[{1}]'.format(label, ihdu)
elif ihdu == 0:
name = label
else:
name = hdu.name
masks[name] = hdu.data > 0
if len(masks) == 0:
raise ValueError('No HDUs with integer values (which would normally indicate a mask) were found in file')
return masks
@subset_mask_exporter(label='FITS', extension=['fits', 'fit',
'fits.gz', 'fit.gz'])
def fits_subset_mask_exporter(filename, masks):
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU())
# We store the subset masks in the extensions to make sure we can give
# then a name.
for label, mask in masks.items():
hdulist.append(fits.ImageHDU(np.asarray(mask, int), name=label))
hdulist.writeto(filename, overwrite=True)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/io/formats/fits/subset_mask.py",
"copies": "3",
"size": "1716",
"license": "bsd-3-clause",
"hash": -2173280280907220700,
"line_mean": 31.3773584906,
"line_max": 113,
"alpha_frac": 0.6002331002,
"autogenerated": false,
"ratio": 3.821826280623608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5922059380823608,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from contextlib import contextmanager
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Qt
from qtpy.uic import loadUi
from glue.utils.qt import get_text
__all__ = ['update_combobox', 'GlueTabBar', 'load_ui', 'process_dialog', 'combo_as_string']
def update_combobox(combo, labeldata, default_index=0):
"""
Redefine the items in a QComboBox
Parameters
----------
widget : QComboBox
The widget to update
labeldata : sequence of N (label, data) tuples
The combobox will contain N items with the appropriate
labels, and data set as the userData
Returns
-------
combo : QComboBox
The updated input
Notes
-----
If the current userData in the combo box matches
any of labeldata, that selection will be retained.
Otherwise, the first item will be selected.
Signals are disabled while the combo box is updated
The QComboBox is modified inplace
"""
combo.blockSignals(True)
idx = combo.currentIndex()
if idx >= 0:
current = combo.itemData(idx)
else:
current = None
combo.clear()
index = None
for i, (label, data) in enumerate(labeldata):
combo.addItem(label, userData=data)
if data is current or data == current:
index = i
if default_index < 0:
default_index = combo.count() + default_index
if index is None:
index = min(default_index, combo.count() - 1)
combo.setCurrentIndex(index)
combo.blockSignals(False)
# We need to force emit this, otherwise if the index happens to be the
# same as before, even if the data is different, callbacks won't be
# called. So we block the signals until just before now then always call
# callback manually.
combo.currentIndexChanged.emit(index)
class GlueTabBar(QtWidgets.QTabBar):
def __init__(self, *args, **kwargs):
super(GlueTabBar, self).__init__(*args, **kwargs)
def choose_rename_tab(self, index=None):
"""
Prompt user to rename a tab
Parameters
----------
index : int
Index of tab to edit. Defaults to current index
"""
index = index or self.currentIndex()
label = get_text("New Tab Label")
if not label:
return
self.rename_tab(index, label)
def rename_tab(self, index, label):
"""
Updates the name used for given tab
Parameters
----------
index : int
Index of tab to edit. Defaults to current index
label : str
New label to use for this tab
"""
self.setTabText(index, label)
def mouseDoubleClickEvent(self, event):
if event.button() != Qt.LeftButton:
return
index = self.tabAt(event.pos())
if index >= 0:
self.choose_rename_tab(index)
def load_ui(path, parent=None, directory=None):
"""
Load a .ui file
Parameters
----------
path : str
Name of ui file to load
parent : QObject
Object to use as the parent of this widget
Returns
-------
w : QtWidgets.QWidget
The new widget
"""
if directory is not None:
full_path = os.path.join(directory, path)
else:
full_path = os.path.abspath(path)
if not os.path.exists(full_path) and 'site-packages.zip' in full_path:
# Workaround for Mac app
full_path = os.path.join(full_path.replace('site-packages.zip', 'glue'))
return loadUi(full_path, parent)
@contextmanager
def process_dialog(delay=0, accept=False, reject=False, function=None):
"""
Context manager to automatically capture the active dialog and carry out
certain actions.
Note that only one of ``accept``, ``reject``, or ``function`` should be
specified.
Parameters
----------
delay : int, optional
The delay in ms before acting on the dialog (since it may not yet exist
when the context manager is called).
accept : bool, optional
If `True`, accept the dialog after the specified delay.
reject : bool, optional
If `False`, reject the dialog after the specified delay
function : func, optional
For more complex user actions, specify a function that takes the dialog
as the first and only argument.
"""
def _accept(dialog):
dialog.accept()
def _reject(dialog):
dialog.reject()
n_args = sum((accept, reject, function is not None))
if n_args > 1:
raise ValueError("Only one of ``accept``, ``reject``, or "
"``function`` should be specified")
elif n_args == 0:
raise ValueError("One of ``accept``, ``reject``, or "
"``function`` should be specified")
if accept:
function = _accept
elif reject:
function = _reject
def wrapper():
from glue.utils.qt import get_qapp
app = get_qapp()
# Make sure that any window/dialog that needs to be shown is shown
app.processEvents()
dialog = app.activeWindow()
function(dialog)
timer = QtCore.QTimer()
timer.setInterval(delay)
timer.setSingleShot(True)
timer.timeout.connect(wrapper)
timer.start()
yield
def combo_as_string(combo):
"""
Return the text labels of a combo box as a string to make it easier to
check the content of a combo box in tests.
"""
items = [combo.itemText(i) for i in range(combo.count())]
return ":".join(items)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/qt/helpers.py",
"copies": "1",
"size": "5647",
"license": "bsd-3-clause",
"hash": -47789446397426570,
"line_mean": 25.8904761905,
"line_max": 91,
"alpha_frac": 0.6116522047,
"autogenerated": false,
"ratio": 4.1829629629629625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5294615167662963,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from dynd import nd
import datashape
import sys
from functools import partial
from datashape import dshape, Record, to_numpy_dtype, Option
from datashape.predicates import isscalar
import toolz
from toolz import concat, partition_all, first, merge
from cytoolz import pluck
import copy
from datetime import datetime
from numbers import Number
from collections import Iterable, Iterator
import numpy as np
import pandas as pd
import tables as tb
from ..compute.chunks import ChunkIterator, chunks
from ..data.meta import Concat
from ..dispatch import dispatch
from .. import expr
from ..expr import Expr, Projection, Field, Symbol
from ..compute.core import compute
from ..resource import resource
from ..compatibility import _strtypes, map
from ..utils import keywords
from ..data.utils import sort_dtype_items
from ..pytables import PyTables
from ..compute.spark import RDD
__all__ = ['into', 'discover']
@dispatch(object, object)
def into(a, b, **kwargs):
"""
Push data in ``b`` into a container of type ``a``
Examples
--------
>>> into([], (1, 2, 3))
[1, 2, 3]
>>> into(np.ndarray, [['Alice', 100], ['Bob', 200]], names=['name', 'amt'])
rec.array([('Alice', 100), ('Bob', 200)],
dtype=[('name', 'S5'), ('amt', '<i8')])
>>> into(pd.DataFrame, _)
name amt
0 Alice 100
1 Bob 200
"""
raise NotImplementedError(
"Blaze does not know a rule for the following conversion"
"\n%s <- %s" % (type(a).__name__, type(b).__name__))
# Optional imports
try:
from bokeh.objects import ColumnDataSource
except ImportError:
ColumnDataSource = type(None)
try:
import bcolz
from bcolz import ctable, carray
except ImportError:
ctable = type(None)
carray = type(None)
try:
from pymongo.collection import Collection
except ImportError:
Collection = type(None)
try:
from ..data import DataDescriptor, CSV, JSON, JSON_Streaming, Excel, SQL
except ImportError:
DataDescriptor = type(None)
CSV = type(None)
JSON = type(None)
JSON_STREAMING = type(None)
Excel = type(None)
@dispatch(type, object)
def into(a, b, **kwargs):
"""
Resolve into when given a type as a first argument
Usually we give into an example of the thing that we want
>>> into([], (1, 2, 3)) # give me a list like []
[1, 2, 3]
However sometimes it's inconvenient to construct a dummy example.
In those cases we just specify the desired type
>>> into(list, (1, 2, 3))
[1, 2, 3]
"""
f = into.dispatch(a, type(b))
try:
a = a()
except:
pass
return f(a, b, **kwargs)
@dispatch((list, tuple, set), (list, tuple, set, Iterator,
type(dict().items()),
pd.Series, np.record, np.void))
def into(a, b, **kwargs):
return type(a)(b)
@dispatch(set, list)
def into(a, b, **kwargs):
try:
return set(b)
except TypeError:
return set(map(tuple, b))
@dispatch(dict, (list, tuple, set))
def into(a, b, **kwargs):
return dict(b)
@dispatch((list, tuple, set), dict)
def into(a, b, **kwargs):
return type(a)(map(type(a), sorted(b.items(), key=lambda x: x[0])))
@dispatch(nd.array, (Iterable, Number) + _strtypes)
def into(a, b, **kwargs):
return nd.array(b, **kwargs)
@dispatch(nd.array, nd.array)
def into(a, b, **kwargs):
return b
@dispatch(np.ndarray, np.ndarray)
def into(a, b, **kwargs):
return b
@dispatch(list, nd.array)
def into(a, b, **kwargs):
return nd.as_py(b, tuple=True)
@dispatch(tuple, nd.array)
def into(a, b, **kwargs):
return tuple(nd.as_py(b, tuple=True))
@dispatch(np.ndarray, nd.array)
def into(a, b, **kwargs):
return nd.as_numpy(b, allow_copy=True)
def dtype_from_tuple(t):
dshape = discover(t)
names = ['f%d' % i for i in range(len(t))]
types = [x.measure.to_numpy_dtype() for x in dshape.measure.dshapes]
return np.dtype(list(zip(names, types)))
@dispatch(np.ndarray, (Iterable, Iterator))
def into(a, b, **kwargs):
b = iter(b)
first = next(b)
b = toolz.concat([[first], b])
if isinstance(first, datetime):
b = map(np.datetime64, b)
if isinstance(first, (list, tuple)):
return np.rec.fromrecords([tuple(x) for x in b],
dtype=kwargs.pop('dtype',
dtype_from_tuple(first)),
**kwargs)
elif hasattr(first, 'values'):
#detecting sqlalchemy.engine.result.RowProxy types and similar
return np.asarray([tuple(x.values()) for x in b], **kwargs)
else:
return np.asarray(list(b), **kwargs)
def degrade_numpy_dtype_to_python(dt):
"""
>>> degrade_numpy_dtype_to_python(np.dtype('M8[ns]'))
dtype('<M8[us]')
>>> dt = np.dtype([('a', 'S7'), ('b', 'M8[D]'), ('c', 'M8[ns]')])
>>> degrade_numpy_dtype_to_python(dt)
dtype([('a', 'S7'), ('b', '<M8[D]'), ('c', '<M8[us]')])
"""
replacements = {'M8[ns]': np.dtype('M8[us]'),
'M8[as]': np.dtype('M8[us]')}
dt = replacements.get(dt.str.lstrip('<>'), dt)
if str(dt)[0] == '[':
return np.dtype([(name, degrade_numpy_dtype_to_python(dt[name]))
for name in dt.names])
return dt
@dispatch(list, np.ndarray)
def into(a, b, **kwargs):
if 'M8' in str(b.dtype) or 'datetime' in str(b.dtype):
b = b.astype(degrade_numpy_dtype_to_python(b.dtype))
return numpy_ensure_strings(b).tolist()
@dispatch(set, object)
def into(a, b, **kwargs):
return set(into(list, b, **kwargs))
@dispatch(pd.DataFrame, np.ndarray)
def into(df, x, **kwargs):
if len(df.columns) > 0:
columns = list(df.columns)
else:
columns = list(x.dtype.names)
return pd.DataFrame(numpy_ensure_strings(x), columns=columns)
@dispatch((pd.DataFrame, list, tuple, Iterator, nd.array), tb.Table)
def into(a, t, **kwargs):
x = into(np.ndarray, t)
return into(a, x, **kwargs)
@dispatch(np.ndarray, tb.Table)
def into(_, t, **kwargs):
res = t[:]
dt_fields = [k for k, v in t.coltypes.items() if v == 'time64']
if not dt_fields:
return res
for f in dt_fields:
# pytables is in seconds since epoch
res[f] *= 1e6
fields = []
for name, dtype in sort_dtype_items(t.coldtypes.items(), t.colnames):
typ = getattr(t.cols, name).type
fields.append((name, {'time64': 'datetime64[us]',
'time32': 'datetime64[D]',
'string': dtype.str}.get(typ, typ)))
return res.astype(np.dtype(fields))
def numpy_fixlen_strings(x):
""" Returns new array with strings as fixed length
>>> from numpy import rec
>>> x = rec.array([(1, 'Alice', 100), (2, 'Bob', 200)],
... dtype=[('id', 'i8'), ('name', 'O'), ('amount', 'i8')])
>>> numpy_fixlen_strings(x) # doctest: +SKIP
rec.array([(1, 'Alice', 100), (2, 'Bob', 200)],
dtype=[('id', '<i8'), ('name', 'S5'), ('amount', '<i8')])
"""
if "'O'" in str(x.dtype):
dt = [(n, "S%d" % max(map(len, x[n]))
if x.dtype[n] == 'O' else x.dtype[n])
for n in x.dtype.names]
x = x.astype(dt)
return x
def typehint(x, typedict):
"""Replace the dtypes in `x` keyed by `typedict` with the dtypes in
`typedict`.
"""
dtype = x.dtype
lhs = dict(zip(dtype.fields.keys(), map(first, dtype.fields.values())))
dtype_list = list(merge(lhs, typedict).items())
return x.astype(np.dtype(sort_dtype_items(dtype_list, dtype.names)))
@dispatch(tb.Table, np.ndarray)
def into(t, x, **kwargs):
dt_types = dict((k, 'datetime64[us]') for k, (v, _) in
x.dtype.fields.items() if issubclass(v.type, np.datetime64))
x = numpy_ensure_bytes(numpy_fixlen_strings(x))
x = typehint(typehint(x, dt_types), dict.fromkeys(dt_types, 'f8'))
for name in dt_types:
x[name] /= 1e6
t.append(x)
return t
@dispatch(tb.Table, ChunkIterator)
def into(t, c, **kwargs):
for chunk in c:
into(t, chunk, **kwargs)
return t
@dispatch(tb.node.MetaNode, tb.Table)
def into(table, data, filename=None, datapath=None, **kwargs):
dshape = datashape.dshape(kwargs.setdefault('dshape', discover(data)))
t = PyTables(filename, datapath=datapath, dshape=dshape)
return into(t, data)
@dispatch(ctable, tb.Table)
def into(bc, data, **kwargs):
cs = chunks(data)
bc = into(bc, next(cs))
for chunk in cs:
bc.append(chunk)
return bc
@dispatch(tb.node.MetaNode, np.ndarray)
def into(_, x, filename=None, datapath=None, **kwargs):
# tb.node.MetaNode == type(tb.Table)
x = numpy_ensure_bytes(numpy_fixlen_strings(x))
t = PyTables(filename, datapath=datapath, dshape=discover(x))
return into(t, x, **kwargs)
@dispatch(tb.node.MetaNode, (ctable, list))
def into(_, data, filename=None, datapath=None, **kwargs):
t = PyTables(filename, datapath=datapath,
dshape=kwargs.get('dshape', discover(data)))
for chunk in map(partial(into, np.ndarray), chunks(data)):
into(t, chunk)
return t
@dispatch(tb.Table, (pd.DataFrame, CSV, SQL, nd.array, Collection))
def into(a, b, **kwargs):
return into(a, into(np.ndarray, b), **kwargs)
@dispatch(tb.Table, _strtypes)
def into(a, b, **kwargs):
kw = dict(kwargs)
if 'output_path' in kw:
del kw['output_path']
r = resource(b, **kw)
return into(a, r, **kwargs)
@dispatch(list, pd.DataFrame)
def into(_, df, **kwargs):
return into([], into(np.ndarray(0), df))
@dispatch(pd.DataFrame, nd.array)
def into(a, b, **kwargs):
ds = dshape(nd.dshape_of(b))
if list(a.columns):
names = list(a.columns)
elif isinstance(ds[-1], Record):
names = ds[-1].names
else:
names = None
if names:
return pd.DataFrame(nd.as_py(b), columns=names)
else:
return pd.DataFrame(nd.as_py(b))
@dispatch(pd.DataFrame, (list, tuple, Iterator, type(dict().items())))
def into(df, seq, **kwargs):
if list(df.columns):
return pd.DataFrame(list(seq), columns=df.columns, **kwargs)
else:
return pd.DataFrame(list(seq), **kwargs)
@dispatch(pd.DataFrame, pd.DataFrame)
def into(_, df, **kwargs):
return df.copy()
@dispatch(pd.Series, pd.Series)
def into(_, ser, **kwargs):
return ser
@dispatch(pd.Series, Iterator)
def into(a, b, **kwargs):
return into(a, list(b), **kwargs)
@dispatch(pd.Series, (list, tuple))
def into(a, b, **kwargs):
return pd.Series(b, **kwargs)
@dispatch(pd.Series, Expr)
def into(ser, col, **kwargs):
ser = into(ser, compute(col))
ser.name = col._name
return ser
@dispatch(pd.Series, pd.DataFrame)
def into(a, b, **kwargs):
if len(b.columns) != 1:
raise TypeError('Cannot transform a multiple column expression to a'
' Series')
s = b.squeeze()
if a.name is not None:
s.name = a.name
return s
@dispatch(pd.Series, Projection)
def into(ser, col, **kwargs):
return into(pd.Series, into(pd.DataFrame, col))
@dispatch(pd.Series, np.ndarray)
def into(s, x, **kwargs):
return pd.Series(numpy_ensure_strings(x), name=s.name)
@dispatch(pd.DataFrame, pd.Series)
def into(_, df, **kwargs):
return pd.DataFrame(df)
@dispatch(list, pd.Series)
def into(_, ser, **kwargs):
return ser.tolist()
@dispatch(nd.array, pd.DataFrame)
def into(a, df, **kwargs):
schema = discover(df)
arr = nd.empty(str(schema))
for i in range(len(df.columns)):
arr[:, i] = np.asarray(df[df.columns[i]])
return arr
@dispatch(np.ndarray, pd.DataFrame)
def into(a, df, **kwargs):
return df.to_records(index=False)
@dispatch(nd.array)
def discover(arr):
return dshape(nd.dshape_of(arr))
@dispatch(pd.DataFrame)
def discover(df):
obj = datashape.coretypes.object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [datashape.string if dt == obj else dt for dt in dtypes]
schema = Record(list(zip(names, dtypes)))
return len(df) * schema
@dispatch(pd.Series)
def discover(s):
return discover(s.to_frame())
@dispatch(np.ndarray, carray)
def into(a, b, **kwargs):
return b[:]
@dispatch(pd.Series, carray)
def into(a, b, **kwargs):
return into(a, into(np.ndarray, b))
@dispatch(ColumnDataSource, (pd.DataFrame, np.ndarray, ctable))
def into(cds, t, **kwargs):
columns = discover(t).subshape[0][0].names
return ColumnDataSource(data=dict((col, into([], t[col]))
for col in columns))
@dispatch(ColumnDataSource, Expr)
def into(cds, t, **kwargs):
columns = t.fields
return ColumnDataSource(data=dict((col, into([], t[col]))
for col in columns))
@dispatch(ColumnDataSource, tb.Table)
def into(cds, t, **kwargs):
return into(cds, into(pd.DataFrame, t))
@dispatch(ColumnDataSource, nd.array)
def into(cds, t, **kwargs):
columns = discover(t).subshape[0][0].names
return ColumnDataSource(data=dict((col, into([], getattr(t, col)))
for col in columns))
@dispatch(ColumnDataSource, Collection)
def into(cds, other, **kwargs):
return into(cds, into(pd.DataFrame, other))
@dispatch(ctable, Expr)
def into(a, b, **kwargs):
c = compute(b)
if isinstance(c, (list, tuple, Iterator)):
kwargs['types'] = [datashape.to_numpy_dtype(t) for t in
b.schema[0].types]
kwargs['names'] = b.fields
return into(a, c, **kwargs)
@dispatch(pd.DataFrame, ColumnDataSource)
def into(df, cds, **kwargs):
return cds.to_df()
def fix_len_string_filter(ser):
""" Convert object strings to fixed length, pass through others """
if ser.dtype == np.dtype('O'):
return np.asarray(list(ser))
else:
return np.asarray(ser)
@dispatch(ctable, nd.array)
def into(a, b, **kwargs):
names = dshape(nd.dshape_of(b))[1].names
columns = [getattr(b, name) for name in names]
columns = [np.asarray(nd.as_py(c))
if to_numpy_dtype(dshape(nd.dshape_of(c))) == np.dtype('O')
else into(np.ndarray(0), c) for c in columns]
return bcolz.ctable(columns, names=names, **kwargs)
@dispatch(ctable, pd.DataFrame)
def into(a, df, **kwargs):
kwargs = toolz.keyfilter(keywords(ctable).__contains__, kwargs)
return ctable([fix_len_string_filter(df[c]) for c in df.columns],
names=list(df.columns), **kwargs)
@dispatch(pd.DataFrame, ctable)
def into(a, b, **kwargs):
return b.todataframe()
@dispatch(nd.array, ctable)
def into(a, b, **kwargs):
return into(a, b[:], **kwargs)
@dispatch(ctable, ctable)
def into(a, b, **kwargs):
if not kwargs and a == ctable:
return b
else:
raise NotImplementedError()
@dispatch(Collection, DataDescriptor)
def into(coll, dd, chunksize=1024, **kwargs):
return into(coll, iter(dd), chunksize=chunksize, schema=dd.schema)
@dispatch(Collection, (tuple, list, Iterator))
def into(coll, seq, columns=None, schema=None, chunksize=1024, **kwargs):
seq = iter(seq)
item = next(seq)
seq = concat([[item], seq])
if isinstance(item, (tuple, list)):
if not columns and schema:
columns = dshape(schema)[0].names
if not columns:
raise ValueError("Inputs must be dictionaries. "
"Or provide columns=[...] or schema=DataShape(...) keyword")
seq = (dict(zip(columns, item)) for item in seq)
for block in partition_all(1024, seq):
coll.insert(copy.deepcopy(block))
return coll
def numpy_ensure_strings(x):
""" Return a new array with strings that will be turned into the str type
In Python 3 the 'S' numpy type results in ``bytes`` objects. This coerces the
numpy type to a form that will create ``str`` objects
Examples
--------
>>> x = np.array(['a', 'b'], dtype='S1')
>>> # Python 2
>>> numpy_ensure_strings(x) # doctest: +SKIP
np.array(['a', 'b'], dtype='S1')
>>> # Python 3
>>> numpy_ensure_strings(x) # doctest: +SKIP
np.array(['a', 'b'], dtype='U1')
"""
if sys.version_info[0] >= 3 and 'S' in str(x.dtype):
if x.dtype.names:
dt = [(n, x.dtype[n].str.replace('S', 'U')) for n in x.dtype.names]
else:
dt = x.dtype.str.replace('S', 'U')
x = x.astype(dt)
return x
def numpy_ensure_bytes(x):
"""Return a numpy array whose string fields are converted to the bytes type
appropriate for the Python version.
Parameters
----------
x : np.ndarray
Record array
Returns
-------
x : np.ndarray
Record array with any unicode string type as a bytes type
Examples
--------
>>> x = np.array(['a', 'b'])
>>> # Python 2
>>> numpy_ensure_bytes(x) # doctest: +SKIP
np.array(['a', 'b'], dtype='|S1')
>>> # Python 3
>>> numpy_ensure_strings(x) # doctest: +SKIP
np.array([b'a', b'b'], dtype='|S1')
"""
if 'U' in str(x.dtype):
if x.dtype.names is not None:
dt = [(n, x.dtype[n].str.replace('U', 'S')) for n in x.dtype.names]
else:
dt = x.dtype.str.replace('U', 'S')
x = x.astype(dt)
return x
@dispatch(Collection, (nd.array, np.ndarray))
def into(coll, x, **kwargs):
return into(coll, into(pd.DataFrame(), x), **kwargs)
@dispatch(Collection, ctable)
def into(coll, x, **kwargs):
from blaze.bcolz import chunks
for chunk in chunks(x):
into(coll, chunk)
@dispatch(Collection, Collection)
def into(a, b, **kwargs):
""" Copy collection on server-side
https://groups.google.com/forum/#!topic/mongodb-user/wHqJFp44baY
"""
b.database.command('eval', 'db.%s.copyTo("%s")' % (b.name, a.name),
nolock=True)
return b
@dispatch(Collection, pd.DataFrame)
def into(coll, df, **kwargs):
return into(coll, into([], df), columns=list(df.columns), **kwargs)
@dispatch(Collection, Expr)
def into(coll, t, **kwargs):
from blaze import compute
result = compute(t)
return into(coll, result, schema=t.schema, **kwargs)
@dispatch(pd.DataFrame, Collection)
def into(df, coll, **kwargs):
seq = list(coll.find())
for item in seq:
del item['_id']
return pd.DataFrame(seq, **kwargs)
@dispatch((nd.array, np.ndarray), Collection)
def into(x, coll, **kwargs):
return into(x, into(pd.DataFrame(), coll), **kwargs)
def _into_iter_mongodb(l, coll, columns=None, schema=None):
""" Into helper function
Return both a lazy sequence of tuples and a list of column names
"""
seq = coll.find()
if not columns and schema:
columns = schema[0].names
elif not columns:
item = next(seq)
seq = concat([[item], seq])
columns = sorted(item.keys())
columns.remove('_id')
return columns, pluck(columns, seq)
@dispatch((carray, ctable), Collection)
def into(x, coll, columns=None, schema=None, **kwargs):
columns, seq = _into_iter_mongodb(x, coll, columns=None, schema=None)
return into(x, seq, names=columns, **kwargs)
@dispatch(Iterator, Collection)
def into(l, coll, columns=None, schema=None):
columns, seq = _into_iter_mongodb(l, coll, columns=columns, schema=schema)
return seq
@dispatch((tuple, list), Collection)
def into(l, coll, columns=None, schema=None):
r = into(Iterator, coll, columns=columns, schema=schema)
return type(l)(r)
@dispatch(Collection, CSV)
def into(coll, d, if_exists="replace", **kwargs):
"""
Convert from TSV/CSV into MongoDB Collection
Parameters
----------
if_exists : string
{replace, append, fail}
header: bool (TSV/CSV only)
Flag to define if file contains a header
columns: list (TSV/CSV only)
list of column names
ignore_blank: bool
Ignores empty fields in csv and tsv exports. Default: creates fields without values
"""
import subprocess
from dateutil import parser
csv_dd = d
db = coll.database
copy_info = {
'dbname': db.name,
'coll': coll.name,
'abspath': d._abspath
}
optional_flags = []
if if_exists == 'replace':
optional_flags.append('--drop')
if kwargs.get('header', csv_dd.header):
optional_flags.append('--headerline')
if kwargs.get('ignore_blank', None):
optional_flags.append('--ignoreBlanks')
cols = kwargs.get('columns', csv_dd.columns)
copy_info['column_names'] = ','.join(cols)
delim = csv_dd.dialect['delimiter']
typ = copy_info['file_type'] = {',': 'csv', '\t': 'tsv'}.get(delim, None)
if typ is None:
dd_into_coll = into.dispatch(Collection, DataDescriptor)
return dd_into_coll(coll, csv_dd)
copy_cmd = ("mongoimport -d {dbname} -c {coll} --type {file_type} "
"--file {abspath} --fields {column_names} ")
copy_cmd = copy_cmd.format(**copy_info) + ' '.join(optional_flags)
ps = subprocess.Popen(copy_cmd, shell=os.name != 'nt',
stdout=subprocess.PIPE)
ps.wait()
# need to check for date columns and update
date_cols = []
dshape = csv_dd.dshape
for t, c in zip(dshape[1].types, dshape[1].names):
if isinstance(t, Option):
t = t.ty
if isinstance(t, (datashape.Date, datashape.DateTime)):
date_cols.append((c, t))
for d_col, ty in date_cols:
mongo_data = list(coll.find({}, {d_col: 1}))
for doc in mongo_data:
try:
t = parser.parse(doc[d_col])
except AttributeError:
t = doc[d_col]
m_id = doc['_id']
coll.update({'_id': m_id}, {"$set": {d_col: t}})
return coll
@dispatch(Collection, (JSON, JSON_Streaming))
def into(coll, d, if_exists="replace", **kwargs):
"""
into function which converts TSV/CSV/JSON into a MongoDB Collection
Parameters
----------
if_exists : string
{replace, append, fail}
json_array : bool
Accepts the import of data expressed with multiple MongoDB documents within a single JSON array.
"""
import subprocess
json_dd = d
db = coll.database
copy_info = {
'dbname': db.name,
'coll': coll.name,
'abspath': d._abspath
}
optional_flags = []
if if_exists == 'replace':
optional_flags.append('--drop')
if kwargs.get('json_array', None):
optional_flags.append('--jsonArray')
copy_info['file_type'] = 'json'
copy_cmd = ("mongoimport -d {dbname} -c {coll} --type {file_type} "
"--file {abspath} ")
copy_cmd = copy_cmd.format(**copy_info) + ' '.join(optional_flags)
ps = subprocess.Popen(copy_cmd, shell=os.name != 'nt',
stdout=subprocess.PIPE)
ps.wait()
@dispatch(nd.array, DataDescriptor)
def into(_, dd, **kwargs):
return dd.dynd[:]
@dispatch(Iterator, DataDescriptor)
def into(_, dd, **kwargs):
return iter(dd)
@dispatch((np.ndarray, ColumnDataSource, ctable), DataDescriptor)
def into(a, b, **kwargs):
return into(a, into(nd.array(), b), **kwargs)
@dispatch((np.ndarray, ColumnDataSource, ctable, tb.Table, list, tuple, set),
(CSV, Excel))
def into(a, b, **kwargs):
return into(a, into(pd.DataFrame(), b, **kwargs), **kwargs)
@dispatch(ColumnDataSource, pd.Series)
def into(a, b, **kwargs):
return ColumnDataSource(data={b.name: b.tolist()})
@dispatch((list, tuple, set), ColumnDataSource)
def into(a, cds, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(zip(*cds.data.values()))
@dispatch(pd.DataFrame, CSV)
def into(a, b, **kwargs):
# Pass only keyword arguments appropriate for read_csv
kws = keywords(pd.read_csv)
options = toolz.merge(b.dialect, kwargs)
options = toolz.keyfilter(kws.__contains__, options)
return b.pandas_read_csv(chunksize=None, **options)
@dispatch((np.ndarray, pd.DataFrame, ColumnDataSource, ctable, tb.Table, list,
tuple, set), (Projection, Field))
def into(a, b, **kwargs):
""" Special case on anything <- Data(CSV)[columns]
Many CSV injest functions have keyword arguments to take only certain
columns. We should leverage these if our input is of the form like the
following for CSVs
>>> csv = CSV('/path/to/file.csv') # doctest: +SKIP
>>> t = Data(csv) # doctest: +SKIP
>>> into(list, t[['column-1', 'column-2']]) # doctest: +SKIP
"""
if isinstance(b._child, Symbol) and isinstance(b._child.data, CSV):
kwargs.setdefault('names', b._child.fields)
kwargs.setdefault('usecols', b.fields)
kwargs.setdefault('squeeze', isscalar(b.dshape.measure))
return into(a, b._child.data, **kwargs)
else:
# TODO, replace with with raise MDNotImplementeError once
# https://github.com/mrocklin/multipledispatch/pull/39 is merged
a = a if isinstance(a, type) else type(a)
f = into.dispatch(a, Expr)
return f(a, b, **kwargs)
# TODO: add signature for SQL import
# TODO: CSV of Field
@dispatch(pd.DataFrame, DataDescriptor)
def into(a, b):
return pd.DataFrame(list(b), columns=b.columns)
@dispatch(pd.DataFrame, Concat)
def into(a, b, **kwargs):
"""Convert a sequence of DataDescriptors to a DataFrame by converting each
to a DataFrame and then calling pandas.concat on the resulting sequence.
"""
return pd.concat((into(pd.DataFrame, d) for d in b.descriptors),
ignore_index=kwargs.pop('ignore_index', True),
**kwargs)
@dispatch(object, Expr)
def into(a, b):
return compute(b)
@dispatch(_strtypes, _strtypes)
def into(a, b, **kwargs):
""" Transfer data between two URIs
Transfer data between two data resources based on their URIs.
>>> into('sqlite://:memory:::tablename', '/path/to/file.csv') #doctest:+SKIP
<blaze.data.sql.SQL at 0x7f32d80b80d0>
Uses ``resource`` functin to resolve data resources
See Also
--------
blaze.resource.resource
"""
b = resource(b, **kwargs)
return into(a, b, **kwargs)
@dispatch((type, RDD, set, np.ndarray, object), _strtypes)
def into(a, b, **kwargs):
return into(a, resource(b, **kwargs), **kwargs)
@dispatch(_strtypes, (Expr, RDD, object))
def into(a, b, **kwargs):
dshape = kwargs.pop('dshape', None)
dshape = dshape or discover(b)
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
target = resource(a, dshape=dshape,
schema=dshape.subshape[0],
mode='a',
**kwargs)
return into(target, b, dshape=dshape, **kwargs)
@dispatch(Iterator, (list, tuple, set, Iterator))
def into(a, b):
return b
@dispatch(pd.DataFrame, Excel)
def into(df, xl):
return pd.read_excel(xl.path, sheetname=xl.worksheet)
@dispatch(pd.DataFrame, ChunkIterator)
def into(df, chunks, **kwargs):
dfs = [into(df, chunk, **kwargs) for chunk in chunks]
return pd.concat(dfs, ignore_index=True)
@dispatch(np.ndarray, ChunkIterator)
def into(x, chunks, **kwargs):
arrs = [into(x, chunk, **kwargs) for chunk in chunks]
return np.vstack(arrs)
@dispatch((DataDescriptor, Collection), ChunkIterator)
def into(coll, chunks, **kwargs):
for chunk in chunks:
into(coll, chunk, **kwargs)
return coll
@dispatch((list, tuple, set), DataDescriptor)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(b)
@dispatch(DataDescriptor, (list, tuple, set, DataDescriptor, Iterator))
def into(a, b, **kwargs):
a.extend(b)
return a
@dispatch(DataDescriptor, (np.ndarray, nd.array, pd.DataFrame, Collection))
def into(a, b, **kwargs):
a.extend(into(list,b))
return a
@dispatch(Number, Number)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(b)
@dispatch(object)
def into(a, **kwargs):
""" Curried into function
>>> f = into(list)
>>> f((1, 2, 3))
[1, 2, 3]
"""
def partial_into(b, **kwargs2):
return into(a, b, **merge(kwargs, kwargs2))
return partial_into
# This is only here due to a conflict
# Which is only because issubclass(carray, Iterable)
@dispatch(Collection, carray)
def into(a, b, **kwargs):
into(a, into(Iterator, b, **kwargs))
return a
| {
"repo_name": "vitan/blaze",
"path": "blaze/api/into.py",
"copies": "1",
"size": "28712",
"license": "bsd-3-clause",
"hash": -1763951471820396500,
"line_mean": 26.6342637151,
"line_max": 104,
"alpha_frac": 0.6053566453,
"autogenerated": false,
"ratio": 3.3097406340057636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4415097279305763,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from functools import partial
from qtpy.QtCore import Qt
from qtpy import QtGui, QtWidgets
from glue.core import message as msg
from glue.viewers.histogram.client import HistogramClient
from glue.viewers.common.qt.mpl_toolbar import MatplotlibViewerToolbar
from glue.utils.qt import load_ui
from glue.utils.qt.widget_properties import (connect_int_spin, ButtonProperty,
FloatLineProperty, connect_float_edit,
ValueProperty, connect_bool_button)
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.mpl_widget import MplWidget, defer_draw
from glue.viewers.histogram.qt.layer_style_widget import HistogramLayerStyleWidget
from glue.viewers.histogram.layer_artist import HistogramLayerArtist
__all__ = ['HistogramWidget']
WARN_SLOW = 10000000
def _hash(x):
return str(id(x))
class HistogramWidget(DataViewer):
LABEL = "Histogram"
_property_set = DataViewer._property_set + \
'component xlog ylog normed cumulative autoscale xmin xmax nbins'.split(
)
xmin = FloatLineProperty('ui.xmin', 'Minimum value')
xmax = FloatLineProperty('ui.xmax', 'Maximum value')
normed = ButtonProperty('ui.normalized_box', 'Normalized?')
autoscale = ButtonProperty('ui.autoscale_box',
'Autoscale view to histogram?')
cumulative = ButtonProperty('ui.cumulative_box', 'Cumulative?')
nbins = ValueProperty('ui.binSpinBox', 'Number of bins')
xlog = ButtonProperty('ui.xlog_box', 'Log-scale the x axis?')
ylog = ButtonProperty('ui.ylog_box', 'Log-scale the y axis?')
_layer_style_widget_cls = {HistogramLayerArtist: HistogramLayerStyleWidget}
_toolbar_cls = MatplotlibViewerToolbar
tools = ['select:xrange']
def __init__(self, session, parent=None):
super(HistogramWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self.option_widget = QtWidgets.QWidget()
self.ui = load_ui('options_widget.ui', self.option_widget,
directory=os.path.dirname(__file__))
self._tweak_geometry()
self.client = HistogramClient(self._data,
self.central_widget.canvas.fig,
layer_artist_container=self._layer_artist_container)
self._init_limits()
self._connect()
# maps _hash(componentID) -> componentID
self._component_hashes = {}
def _init_limits(self):
validator = QtGui.QDoubleValidator(None)
validator.setDecimals(7)
self.ui.xmin.setValidator(validator)
self.ui.xmax.setValidator(validator)
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
ui.attributeCombo.currentIndexChanged.connect(self._set_attribute_from_combo)
ui.normalized_box.toggled.connect(partial(setattr, cl, 'normed'))
ui.autoscale_box.toggled.connect(partial(setattr, cl, 'autoscale'))
ui.cumulative_box.toggled.connect(partial(setattr, cl, 'cumulative'))
connect_int_spin(cl, 'nbins', ui.binSpinBox)
connect_float_edit(cl, 'xmin', ui.xmin)
connect_float_edit(cl, 'xmax', ui.xmax)
connect_bool_button(cl, 'xlog', ui.xlog_box)
connect_bool_button(cl, 'ylog', ui.ylog_box)
@defer_draw
def _update_attributes(self):
"""Repopulate the combo box that selects the quantity to plot"""
combo = self.ui.attributeCombo
component = self.component
new = self.client.component or component
combo.blockSignals(True)
combo.clear()
# implementation note:
# PySide doesn't robustly store python objects with setData
# use _hash(x) instead
model = QtGui.QStandardItemModel()
data_ids = set(_hash(d) for d in self._data)
self._component_hashes = dict((_hash(c), c) for d in self._data
for c in d.components)
found = False
for d in self._data:
if d not in self._layer_artist_container:
continue
item = QtGui.QStandardItem(d.label)
item.setData(_hash(d), role=Qt.UserRole)
assert item.data(Qt.UserRole) == _hash(d)
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
model.appendRow(item)
for c in d.visible_components:
if (not d.get_component(c).categorical and
not d.get_component(c).numeric):
continue
if c is new:
found = True
item = QtGui.QStandardItem(c.label)
item.setData(_hash(c), role=Qt.UserRole)
model.appendRow(item)
combo.setModel(model)
# separators below data items
for i in range(combo.count()):
if combo.itemData(i) in data_ids:
combo.insertSeparator(i + 1)
combo.blockSignals(False)
if found:
self.component = new
else:
combo.setCurrentIndex(2) # skip first data + separator
self._set_attribute_from_combo()
@property
def component(self):
combo = self.ui.attributeCombo
index = combo.currentIndex()
return self._component_hashes.get(combo.itemData(index), None)
@component.setter
def component(self, component):
combo = self.ui.attributeCombo
if combo.count() == 0: # cold start problem, when restoring
self._update_attributes()
# combo.findData doesn't seem to work robustly
for i in range(combo.count()):
data = combo.itemData(i)
if data == _hash(component):
combo.setCurrentIndex(i)
return
raise IndexError("Component not present: %s" % component)
@defer_draw
def _set_attribute_from_combo(self, *args):
if self.component is not None:
for d in self._data:
try:
component = d.get_component(self.component)
except:
continue
else:
break
if component.categorical:
if self.ui.xlog_box.isEnabled():
self.ui.xlog_box.setEnabled(False)
self.xlog = False
else:
if not self.ui.xlog_box.isEnabled():
self.ui.xlog_box.setEnabled(True)
self.client.set_component(self.component)
self.update_window_title()
@defer_draw
def add_data(self, data):
""" Add data item to combo box.
If first addition, also update attributes """
if self.data_present(data):
return True
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
self.client.add_layer(data)
self._update_attributes()
return True
def add_subset(self, subset):
pass
def _remove_data(self, data):
""" Remove data item from the combo box """
pass
def data_present(self, data):
return data in self._layer_artist_container
def register_to_hub(self, hub):
super(HistogramWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=lambda x: self._remove_data(x.data))
hub.subscribe(self,
msg.DataUpdateMessage,
handler=lambda *args: self._update_labels())
hub.subscribe(self,
msg.ComponentsChangedMessage,
handler=lambda x: self._update_attributes())
def unregister(self, hub):
super(HistogramWidget, self).unregister(hub)
self.client.unregister(hub)
hub.unsubscribe_all(self)
@property
def window_title(self):
c = self.client.component
if c is not None:
label = str(c.label)
else:
label = 'Histogram'
return label
def _update_labels(self):
self.update_window_title()
self._update_attributes()
def __str__(self):
return "Histogram Widget"
def options_widget(self):
return self.option_widget
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/histogram/qt/viewer_widget.py",
"copies": "1",
"size": "8779",
"license": "bsd-3-clause",
"hash": -3931380991496283600,
"line_mean": 33.6996047431,
"line_max": 90,
"alpha_frac": 0.5955120173,
"autogenerated": false,
"ratio": 4.0889613414066135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184473358706614,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from inspect import getargspec
from qtpy import QtWidgets
from qtpy import PYSIDE
from glue import core
from glue.config import link_function, link_helper
from glue.utils import nonpartial
from glue.utils.qt import load_ui, messagebox_on_error, update_combobox
from glue.utils.qt.widget_properties import CurrentComboTextProperty, CurrentComboDataProperty
__all__ = ['LinkEquation']
def get_function_name(item):
if hasattr(item, 'display') and item.display is not None:
return item.display
else:
return item.__name__
def function_label(function):
""" Provide a label for a function
:param function: A member from the glue.config.link_function registry
"""
args = getargspec(function.function)[0]
args = ', '.join(args)
output = function.output_labels
output = ', '.join(output)
label = "Link from %s to %s" % (args, output)
return label
def helper_label(helper):
""" Provide a label for a link helper
:param helper: A member from the glue.config.link_helper registry
"""
return helper.info
class ArgumentWidget(QtWidgets.QWidget):
def __init__(self, argument, parent=None):
super(ArgumentWidget, self).__init__(parent)
self.layout = QtWidgets.QHBoxLayout()
self.layout.setContentsMargins(1, 0, 1, 1)
self.setLayout(self.layout)
label = QtWidgets.QLabel(argument)
self._label = label
self._component_id = None
self.layout.addWidget(label)
self.editor = QtWidgets.QLineEdit()
self.editor.setReadOnly(True)
try:
self.editor.setPlaceholderText("Drag a component from above")
except AttributeError: # feature added in Qt 4.7
pass
self.layout.addWidget(self.editor)
self.setAcceptDrops(True)
@property
def component_id(self):
return self._component_id
@component_id.setter
def component_id(self, cid):
self._component_id = cid
self.editor.setText(str(cid))
@property
def label(self):
return self._label.text()
@label.setter
def label(self, label):
self._label.setText(label)
@property
def editor_text(self):
return self.editor.text()
def clear(self):
self.component_id = None
self.editor.clear()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('application/py_instance'):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
obj = event.mimeData().data('application/py_instance')
if isinstance(obj, list):
obj = obj[0]
if not isinstance(obj, core.data.ComponentID):
event.ignore()
return
self.component_id = obj
event.accept()
class LinkEquation(QtWidgets.QWidget):
""" Interactively define ComponentLinks from existing functions
This widget inspects the calling signatures of helper functions,
and presents the user with an interface for assigning componentIDs
to the input and output arguments. It also generates ComponentLinks
from this information.
ComponentIDs are assigned to arguments via drag and drop. This
widget is used within the LinkEditor dialog
Usage::
widget = LinkEquation()
"""
category = CurrentComboTextProperty('_ui.category')
function = CurrentComboDataProperty('_ui.function')
def __init__(self, parent=None):
super(LinkEquation, self).__init__(parent)
# Set up mapping of function/helper name -> function/helper tuple. For the helpers, we use the 'display' name if available.
self._argument_widgets = []
self.spacer = None
self._output_widget = ArgumentWidget("")
# pyqt4 can't take self as second argument here
# for some reason. Manually embed
self._ui = load_ui('link_equation.ui', None,
directory=os.path.dirname(__file__))
l = QtWidgets.QHBoxLayout()
l.addWidget(self._ui)
self.setLayout(l)
self._init_widgets()
self._populate_category_combo()
self.category = 'General'
self._populate_function_combo()
self._connect()
self._setup_editor()
def set_result_visible(self, state):
self._ui.output_canvas.setVisible(state)
self._ui.output_label.setVisible(state)
def is_helper(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkHelper'
def is_function(self):
return self.function is not None and \
type(self.function).__name__ == 'LinkFunction'
def _init_widgets(self):
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(1)
self._ui.input_canvas.setLayout(layout)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(1, 0, 1, 1)
self._ui.output_canvas.setLayout(layout)
layout.addWidget(self._output_widget)
spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
layout.addItem(spacer)
@property
def add_button(self):
return self._ui.addButton
@property
def signature(self):
""" Returns the ComponentIDs assigned to the input and output arguments
:rtype: tuple of (input, output). Input is a list of ComponentIDs.
output is a ComponentID
"""
inp = [a.component_id for a in self._argument_widgets]
out = self._output_widget.component_id
return inp, out
@signature.setter
def signature(self, inout):
inp, out = inout
for i, a in zip(inp, self._argument_widgets):
a.component_id = i
self._output_widget.component_id = out
@messagebox_on_error("Failed to create links")
def links(self):
""" Create ComponentLinks from the state of the widget
:rtype: list of ComponentLinks that can be created.
If no links can be created (e.g. because of missing input),
the empty list is returned
"""
inp, out = self.signature
if self.is_function():
using = self.function.function
if not all(inp) or not out:
return []
link = core.component_link.ComponentLink(inp, out, using)
return [link]
if self.is_helper():
helper = self.function.helper
if not all(inp):
return []
return helper(*inp)
def _update_add_enabled(self):
state = True
for a in self._argument_widgets:
state = state and a.component_id is not None
if self.is_function():
state = state and self._output_widget.component_id is not None
self._ui.addButton.setEnabled(state)
def _connect(self):
signal = self._ui.function.currentIndexChanged
signal.connect(nonpartial(self._setup_editor))
signal.connect(nonpartial(self._update_add_enabled))
self._output_widget.editor.textChanged.connect(nonpartial(self._update_add_enabled))
self._ui.category.currentIndexChanged.connect(self._populate_function_combo)
def clear_inputs(self):
for w in self._argument_widgets:
w.clear()
self._output_widget.clear()
def _setup_editor(self):
if self.is_function():
self._setup_editor_function()
else:
self._setup_editor_helper()
def _setup_editor_function(self):
""" Prepare the widget for the active function."""
assert self.is_function()
self.set_result_visible(True)
func = self.function.function
args = getargspec(func)[0]
label = function_label(self.function)
self._ui.info.setText(label)
self._output_widget.label = self.function.output_labels[0]
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _setup_editor_helper(self):
"""Setup the editor for the selected link helper"""
assert self.is_helper()
self.set_result_visible(False)
label = helper_label(self.function)
args = self.function.input_labels
self._ui.info.setText(label)
self._clear_input_canvas()
for a in args:
self._add_argument_widget(a)
self.spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._ui.input_canvas.layout().addItem(self.spacer)
def _add_argument_widget(self, argument):
""" Create and add a single argument widget to the input canvas
:param arguement: The argument name (string)
"""
widget = ArgumentWidget(argument)
widget.editor.textChanged.connect(nonpartial(self._update_add_enabled))
self._ui.input_canvas.layout().addWidget(widget)
self._argument_widgets.append(widget)
def _clear_input_canvas(self):
""" Remove all widgets from the input canvas """
layout = self._ui.input_canvas.layout()
for a in self._argument_widgets:
layout.removeWidget(a)
a.close()
if not PYSIDE:
# PySide crashing here
layout.removeItem(self.spacer)
self._argument_widgets = []
def _populate_category_combo(self):
f = [f for f in link_function.members if len(f.output_labels) == 1]
categories = sorted(set(l.category for l in f + link_helper.members))
update_combobox(self._ui.category, list(zip(categories, categories)))
def _populate_function_combo(self):
""" Add name of functions to function combo box """
f = [f for f in link_function.members if len(f.output_labels) == 1]
functions = ((get_function_name(l[0]), l) for l in f + link_helper.members if l.category == self.category)
update_combobox(self._ui.function, functions)
| {
"repo_name": "saimn/glue",
"path": "glue/dialogs/link_editor/qt/link_equation.py",
"copies": "1",
"size": "10331",
"license": "bsd-3-clause",
"hash": 3284742952285944000,
"line_mean": 32.651465798,
"line_max": 131,
"alpha_frac": 0.6263672442,
"autogenerated": false,
"ratio": 4.1012306470821756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5227597891282175,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from locale import getpreferredencoding
from os.path import abspath
from conda.compat import PY3
from conda.cli.conda_argparse import ArgumentParser
from conda_build.index import update_index
def main():
p = ArgumentParser(
description="Update package index metadata files in given directories.")
p.add_argument(
'dir',
help='Directory that contains an index to be updated.',
nargs='*',
default=[os.getcwd()],
)
p.add_argument(
'-c', "--check-md5",
action="store_true",
help="""Use MD5 values instead of file modification times for determining if a
package's metadata needs to be updated.""",
)
p.add_argument(
'-f', "--force",
action="store_true",
help="Force reading all files.",
)
p.add_argument(
'-q', "--quiet",
action="store_true",
help="Don't show any output.",
)
p.add_argument(
'--no-remove',
action="store_false",
dest="remove",
default=True,
help="Don't remove entries for files that don't exist.",
)
args = p.parse_args()
dir_paths = [abspath(path) for path in args.dir]
# Don't use byte strings in Python 2
if not PY3:
dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]
for path in dir_paths:
update_index(path, verbose=(not args.quiet), force=args.force,
check_md5=args.check_md5, remove=args.remove)
if __name__ == '__main__':
main()
| {
"repo_name": "dan-blanchard/conda-build",
"path": "conda_build/main_index.py",
"copies": "8",
"size": "1619",
"license": "bsd-3-clause",
"hash": -8339690776249541000,
"line_mean": 24.6984126984,
"line_max": 86,
"alpha_frac": 0.6040765905,
"autogenerated": false,
"ratio": 3.920096852300242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8524173442800242,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from multiprocessing import Process
import math
import argparse
import logging
import numpy as np
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.autograd import Variable
from envs import create_env
from models.ES import ES
#logger
logger = logging.getLogger("universe-server")
logger.setLevel(logging.INFO)
def ES_train(env_name):
"""Train Evolution Strategies model in separate process not to block Flask"""
p = Process(target=train_model, args=(env_name,1))
p.start()
def train_model(env_name, num_threads):
"""Train and save the model"""
# set parameters as namespace object and give them values
args = argparse.Namespace()
args.env_name = env_name
args.lr = 0.3 # learning rate
args.lr_decay = 1 #learning rate decay
args.sigma = 0.05 # noise standard deviation
args.n = 40 # batch size (even number)
args.max_episode_length = 10 # maximum length of an episode 100000
args.max_gradient_updates = 10 # 100000
args.restore = '' # restore checkpoint
args.variable_ep_len = False # Change max episode length during training
args.silent = False # Prints during training
env = create_env(args.env_name, client_id="ES1", remotes=1) # Local docker container
chkpt_dir = 'checkpoints/%s/' % args.env_name
if not os.path.exists(chkpt_dir):
os.makedirs(chkpt_dir)
synced_model = ES(env.observation_space.shape[0], env.action_space)
for param in synced_model.parameters():
param.requires_grad = False
if args.restore:
state_dict = torch.load(args.restore)
synced_model.load_state_dict(state_dict)
train_loop(args, synced_model, env, chkpt_dir)
def do_rollouts(args, models, random_seeds, return_queue, env, are_negative):
"""
For each model, do a rollout.
"""
all_returns = []
all_num_frames = []
for model in models:
cx = Variable(torch.zeros(1, 256))
hx = Variable(torch.zeros(1, 256))
state = env.reset()
state = torch.from_numpy(state)
this_model_return = 0
this_model_num_frames = 0
# Rollout
for step in range(args.max_episode_length):
logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True),
(hx, cx)))
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
logger.info()
this_model_return += reward
this_model_num_frames += 1
if done:
break
state = torch.from_numpy(state)
all_returns.append(this_model_return)
all_num_frames.append(this_model_num_frames)
return_queue.put((random_seeds, all_returns, all_num_frames, are_negative))
def perturb_model(args, model, random_seed, env):
"""
Modifies the given model with a pertubation of its parameters,
as well as the negative perturbation, and returns both perturbed
models.
"""
new_model = ES(env.observation_space.shape[0],
env.action_space)
anti_model = ES(env.observation_space.shape[0],
env.action_space)
new_model.load_state_dict(model.state_dict())
anti_model.load_state_dict(model.state_dict())
np.random.seed(random_seed)
for (k, v), (anti_k, anti_v) in zip(new_model.es_params(),
anti_model.es_params()):
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.sigma*eps).float()
anti_v += torch.from_numpy(args.sigma*-eps).float()
return [new_model, anti_model]
optimConfig = []
averageReward = []
maxReward = []
minReward = []
episodeCounter = []
def gradient_update(args, synced_model, returns, random_seeds, neg_list,
num_eps, num_frames, chkpt_dir, unperturbed_results):
def fitness_shaping(returns):
"""
A rank transformation on the rewards, which reduces the chances
of falling into local optima early in training.
"""
sorted_returns_backwards = sorted(returns)[::-1]
lamb = len(returns)
shaped_returns = []
denom = sum([max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
for r in returns])
for r in returns:
num = max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
shaped_returns.append(num/denom + 1/lamb)
return shaped_returns
def unperturbed_rank(returns, unperturbed_results):
nth_place = 1
for r in returns:
if r > unperturbed_results:
nth_place += 1
rank_diag = ('%d out of %d (1 means gradient '
'is uninformative)' % (nth_place,
len(returns) + 1))
return rank_diag, nth_place
batch_size = len(returns)
assert batch_size == args.n
assert len(random_seeds) == batch_size
shaped_returns = fitness_shaping(returns)
rank_diag, rank = unperturbed_rank(returns, unperturbed_results)
print('Episode num: %d\n'
'Average reward: %f\n'
'Variance in rewards: %f\n'
'Max reward: %f\n'
'Min reward: %f\n'
'Batch size: %d\n'
'Max episode length: %d\n'
'Sigma: %f\n'
'Learning rate: %f\n'
'Total num frames seen: %d\n'
'Unperturbed reward: %f\n'
'Unperturbed rank: %s\n\n' %
(num_eps, np.mean(returns), np.var(returns), max(returns),
min(returns), batch_size,
args.max_episode_length, args.sigma, args.lr, num_frames,
unperturbed_results, rank_diag))
averageReward.append(np.mean(returns))
episodeCounter.append(num_eps)
maxReward.append(max(returns))
minReward.append(min(returns))
# For each model, generate the same random numbers as we did
# before, and update parameters. We apply weight decay once.
for i in range(args.n):
np.random.seed(random_seeds[i])
multiplier = -1 if neg_list[i] else 1
reward = shaped_returns[i]
for k, v in synced_model.es_params():
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.lr/(args.n*args.sigma) *
(reward*multiplier*eps)).float()
args.lr *= args.lr_decay
torch.save(synced_model.state_dict(),
os.path.join(chkpt_dir, 'latest.pth'))
return synced_model
def generate_seeds_and_models(args, synced_model, env):
"""
Returns a seed and 2 perturbed models
"""
np.random.seed()
random_seed = np.random.randint(2**30)
two_models = perturb_model(args, synced_model, random_seed, env)
return random_seed, two_models
def train_loop(args, synced_model, env, chkpt_dir):
def flatten(raw_results, index):
notflat_results = [result[index] for result in raw_results]
return [item for sublist in notflat_results for item in sublist]
logger.info("Num params in network %d" % synced_model.count_parameters())
num_eps = 0
total_num_frames = 0
for _ in range(args.max_gradient_updates):
processes = []
return_queue = mp.Queue()
all_seeds, all_models = [], []
# Generate a perturbation and its antithesis
for j in range(int(args.n/2)):
random_seed, two_models = generate_seeds_and_models(args,
synced_model,
env)
# Add twice because we get two models with the same seed
all_seeds.append(random_seed)
all_seeds.append(random_seed)
all_models += two_models
assert len(all_seeds) == len(all_models)
# Keep track of which perturbations were positive and negative
# Start with negative true because pop() makes us go backwards
is_negative = True
# Add all peturbed models to the queue
while all_models:
perturbed_model = all_models.pop()
seed = all_seeds.pop()
p = mp.Process(target=do_rollouts, args=(args,
[perturbed_model],
[seed],
return_queue,
env,
[is_negative]))
p.start()
processes.append(p)
is_negative = not is_negative
assert len(all_seeds) == 0
# Evaluate the unperturbed model as well
p = mp.Process(target=do_rollouts, args=(args, [synced_model],
['dummy_seed'],
return_queue, env,
['dummy_neg']))
p.start()
processes.append(p)
for p in processes:
p.join()
raw_results = [return_queue.get() for p in processes]
seeds, results, num_frames, neg_list = [flatten(raw_results, index)
for index in [0, 1, 2, 3]]
# Separate the unperturbed results from the perturbed results
_ = unperturbed_index = seeds.index('dummy_seed')
seeds.pop(unperturbed_index)
unperturbed_results = results.pop(unperturbed_index)
_ = num_frames.pop(unperturbed_index)
_ = neg_list.pop(unperturbed_index)
total_num_frames += sum(num_frames)
num_eps += len(results)
synced_model = gradient_update(args, synced_model, results, seeds,
neg_list, num_eps, total_num_frames,
chkpt_dir, unperturbed_results)
if args.variable_ep_len:
args.max_episode_length = int(2*sum(num_frames)/len(num_frames))
| {
"repo_name": "ibrica/universe-server",
"path": "trainings/ES_train.py",
"copies": "1",
"size": "10266",
"license": "mit",
"hash": -4872763249916882000,
"line_mean": 38.6409266409,
"line_max": 88,
"alpha_frac": 0.5676992012,
"autogenerated": false,
"ratio": 3.7508220679576176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48185212691576174,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from os import path
from .catalog_arr import load_blaze_array
def is_valid_bpath(d):
"""Returns true if it's a valid blaze path"""
# Disallow backslashes in blaze paths
if '\\' in d:
return False
# There should not be multiple path separators in a row
if '//' in d:
return False
return True
def is_abs_bpath(d):
"""Returns true if it's an absolute blaze path"""
return is_valid_bpath(d) and d.startswith('/')
def is_rel_bpath(d):
"""Returns true if it's a relative blaze path"""
return is_valid_bpath(d) and not d.startswith('/')
def _clean_bpath_components(components):
res = []
for c in components:
if c == '.':
# Remove '.'
pass
elif c == '..':
if all(x == '..' for x in res):
# Relative path starting with '..'
res.append('..')
elif res == ['']:
# Root of absolute path
raise ValueError('Cannot use ".." at root of blaze catalog')
else:
# Remove the last entry
res.pop()
else:
res.append(c)
return res
def _split_bpath(d):
if is_valid_bpath(d):
if d == '':
return []
elif d == '/':
return ['']
elif d.endswith('/'):
d = d[:-1]
return d.split('/')
else:
raise ValueError('Invalid blaze catalog path %r' % d)
def _rejoin_bpath(components):
if components == ['']:
return '/'
else:
return '/'.join(components)
def clean_bpath(d):
if is_valid_bpath(d):
components = _split_bpath(d)
components = _clean_bpath_components(components)
return _rejoin_bpath(components)
else:
raise ValueError('Invalid blaze catalog path %r' % d)
def join_bpath(d1, d2):
if is_abs_bpath(d2):
return clean_bpath(d2)
elif is_abs_bpath(d1):
components = _split_bpath(d1) + _split_bpath(d2)
components = _clean_bpath_components(components)
return _rejoin_bpath(components)
class CatalogDir(object):
"""This object represents a directory path within the blaze catalog"""
def __init__(self, conf, dir):
self.conf = conf
self.dir = dir
if not is_abs_bpath(dir):
raise ValueError('Require an absolute blaze path: %r' % dir)
self._fsdir = path.join(conf.root, dir[1:])
if not path.exists(self._fsdir) or not path.isdir(self._fsdir):
raise RuntimeError('Blaze path not found: %r' % dir)
def ls_arrs(self):
"""Return a list of all the arrays in this blaze dir"""
return self.conf.ls_arrs(self.dir)
def ls_dirs(self):
"""Return a list of all the directories in this blaze dir"""
return self.conf.ls_dirs(self.dir)
def ls(self):
"""
Returns a list of all the arrays and directories in this blaze dir
"""
return self.conf.ls(self.dir)
def __getindex__(self, key):
if isinstance(key, tuple):
key = '/'.join(key)
if not is_rel_bpath(key):
raise ValueError('Require a relative blaze path: %r' % key)
dir = '/'.join([self.dir, key])
fsdir = path.join(self._fsdir, dir)
if path.isdir(fsdir):
return CatalogDir(self.conf, dir)
elif path.isfile(fsdir + '.array'):
return load_blaze_array(self.conf, dir)
else:
raise RuntimeError('Blaze path not found: %r' % dir)
def __repr__(self):
return ("Blaze Catalog Directory\nconfig: %s\ndir: %s"
% (self.conf.configfile, self.dir))
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/catalog/catalog_dir.py",
"copies": "7",
"size": "3772",
"license": "bsd-3-clause",
"hash": 283608171693789540,
"line_mean": 28.2403100775,
"line_max": 76,
"alpha_frac": 0.5567338282,
"autogenerated": false,
"ratio": 3.658583899127061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.771531772732706,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from os import path
import glob
import shutil
import tempfile
from dynd import nd, ndt
from .. import array
def load_json_file_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
dsfile = path.dirname(root) + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s'
% array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Load the JSON
# TODO: Add stream support to parse_json for compressed JSON, etc.
arr = nd.parse_json(dt, nd.memmap(root + '.json'))
return array(arr)
def load_json_directory_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files, assuming they're just #.json
# Sort them numerically
files = sorted([(int(path.splitext(path.basename(x))[0]), x)
for x in glob.glob(path.join(root, '*.json'))])
files = [x[1] for x in files]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(files):
nd.parse_json(arr[i], nd.memmap(fname))
arr.flag_as_immutable()
return array(arr)
def load_json_file_list_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files -- no assumption on file suffix
#open list of files and load into python list
files = root + '.files'
with open(files) as f:
l_files = [fs.strip() for fs in f]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(l_files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(l_files):
with open(fname) as f:
nd.parse_json(arr[i], f.read())
arr.flag_as_immutable()
return array(arr)
class json_array_provider:
def __init__(self, root_dir):
if not path.isdir(root_dir):
raise ValueError('%s is not a valid directory' % root_dir)
self.root_dir = root_dir
self.array_cache = {}
self.session_dirs = {}
def __call__(self, array_name):
# First check that the .json file at the requested address exists
root = path.join(self.root_dir, array_name[1:])
if (not path.isfile(root + '.json') and
not path.isfile(root + '.deferred.json') and
not path.isfile(root + '.files') and
not path.isdir(root)):
return None
# If we've already read this array into cache, just return it
print('Cache has keys %s' % self.array_cache.keys())
print('Checking cache for %s' % array_name)
if array_name in self.array_cache:
print('Returning cached array %s' % array_name)
return self.array_cache[array_name]
if path.isfile(root + '.json'):
print('Loading array %s from file %s'
% (array_name, root + '.json'))
arr = load_json_file_array(root, array_name)
elif path.isfile(root + '.deferred.json'):
print('Loading deferred array %s from file %s'
% (array_name, root + '.deferred.json'))
with open(root + '.deferred.json') as f:
print(f.read())
raise RuntimeError('TODO: Deferred loading not implemented!')
elif path.isfile(root + '.files'):
print('Loading files from file list: %s' % (root + '.files'))
arr = load_json_file_list_array(root, array_name)
else:
print('Loading array %s from directory %s' % (array_name, root))
arr = load_json_directory_array(root, array_name)
self.array_cache[array_name] = arr
return arr
def create_session_dir(self):
d = tempfile.mkdtemp(prefix='.session_', dir=self.root_dir)
session_name = '/' + os.path.basename(d)
if type(session_name) is unicode:
session_name = session_name.encode('utf-8')
self.session_dirs[session_name] = d
return session_name, d
def delete_session_dir(self, session_name):
shutil.rmtree(self.session_dirs[session_name])
del self.session_dirs[session_name]
def create_deferred_array_filename(self, session_name,
prefix, cache_array):
d = tempfile.mkstemp(suffix='.deferred.json', prefix=prefix,
dir=self.session_dirs[session_name], text=True)
array_name = os.path.basename(d[1])
array_name = session_name + '/' + array_name[:array_name.find('.')]
if type(array_name) is unicode:
array_name = array_name.encode('utf-8')
if cache_array is not None:
self.array_cache[array_name] = cache_array
return (os.fdopen(d[0], "w"), array_name, d[1])
| {
"repo_name": "cezary12/blaze",
"path": "blaze/catalog/array_provider.py",
"copies": "13",
"size": "5440",
"license": "bsd-3-clause",
"hash": 7926381133068320000,
"line_mean": 35.7567567568,
"line_max": 76,
"alpha_frac": 0.5926470588,
"autogenerated": false,
"ratio": 3.6436704621567313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005381591268688043,
"num_lines": 148
} |
from __future__ import absolute_import, division, print_function
import os
from platform import platform
from itertools import product
import pytest
sa = pytest.importorskip('sqlalchemy')
from datashape import dshape, discover
from odo import resource, odo
from odo.utils import tmpfile, filetext
ds = dshape('var * {a: int32, b: int32}')
data = [(1, 2), (10, 20), (100, 200)]
@pytest.yield_fixture
def csv():
with tmpfile('csv') as filename:
yield odo(data, filename, dshape=ds, has_header=False)
def test_simple_into(csv):
tbl = 'testtable'
with tmpfile('db') as filename:
engine = resource('sqlite:///' + filename)
t = resource('sqlite:///' + filename + '::' + tbl,
dshape=ds)
odo(csv, t, dshape=ds)
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute("""SELECT
name
FROM
sqlite_master
WHERE type='table' and name='{0}';""".format(tbl))
sqlite_tbl_names = cursor.fetchall()
conn.close()
assert sqlite_tbl_names[0][0] == tbl
assert odo(t, list) == data
def test_csv_with_header():
with tmpfile('db') as dbfilename:
with filetext('a,b\n1,2\n3,4', extension='csv') as csvfilename:
t = odo(csvfilename, 'sqlite:///%s::mytable' % dbfilename)
assert discover(t) == dshape('var * {a: int64, b: int64}')
assert odo(t, set) == set([(1, 2), (3, 4)])
def test_csv_infer_header():
with tmpfile('db') as dbfilename:
with filetext('a,b\n1,2\n3,4', extension='csv') as csvfilename:
t = odo(csvfilename, 'sqlite:///%s::mytable' % dbfilename)
assert discover(t) == dshape('var * {a: int64, b: int64}')
assert odo(t, set) == set([(1, 2), (3, 4)])
@pytest.mark.parametrize(['sep', 'header'],
product([',', '|', '\t'], [True, False]))
def test_sqlite_to_csv(sep, header):
with tmpfile('db') as dbfilename:
with filetext('a,b\n1,2\n3,4', extension='csv') as csvfilename:
t = odo(csvfilename, 'sqlite:///%s::mytable' % dbfilename)
with tmpfile('.csv') as fn:
odo(t, fn, header=header, delimiter=sep)
with open(fn, 'rt') as f:
lines = f.readlines()
expected = [tuple(map(int, row))
for row in map(lambda x: x.split(sep), lines[header:])]
assert odo(fn, list, delimiter=sep, has_header=header,
dshape=discover(t)) == expected
def test_different_encoding():
encoding = 'latin1'
with tmpfile('db') as db:
sql = odo(os.path.join(os.path.dirname(__file__), 'encoding.csv'),
'sqlite:///%s::t' % db, encoding=encoding)
result = odo(sql, list)
NULL = u'' if 'windows' not in platform().lower() else None
expected = [(u'1958.001.500131-1A', 1, NULL, NULL, 899),
(u'1958.001.500156-6', 1, NULL, NULL, 899),
(u'1958.001.500162-1', 1, NULL, NULL, 899),
(u'1958.001.500204-2', 1, NULL, NULL, 899),
(u'1958.001.500204-2A', 1, NULL, NULL, 899),
(u'1958.001.500204-2B', 1, NULL, NULL, 899),
(u'1958.001.500223-6', 1, NULL, NULL, 9610),
(u'1958.001.500233-9', 1, NULL, NULL, 4703),
(u'1909.017.000018-3', 1, 30.0, u'sumaria', 899)]
assert result == expected
@pytest.yield_fixture
def quoted_sql():
with tmpfile('.db') as db:
try:
yield resource('sqlite:///%s::foo bar' % db, dshape=ds)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
@pytest.mark.xfail(
raises=sa.exc.DatabaseError,
reason='How do you use a quoted table name with the SQLite .import command?'
)
def test_quoted_name(csv, quoted_sql):
with tmpfile('csv') as filename:
csv = odo(data, filename, dshape=ds, has_header=True)
s = odo(csv, quoted_sql)
t = odo(csv, list)
assert sorted(odo(s, list)) == sorted(t)
def test_different_encoding_to_csv():
with tmpfile('db') as dbfilename:
with filetext('a,b\n1,2\n3,4', extension='csv') as csvfilename:
t = odo(
csvfilename,
'sqlite:///%s::mytable' % dbfilename,
encoding='latin1'
)
with tmpfile('.csv') as fn:
with pytest.raises(ValueError):
odo(t, fn, encoding='latin1')
def test_send_parameterized_query_to_csv():
with tmpfile('db') as dbfilename:
with filetext('a,b\n1,2\n3,4', extension='csv') as csvfilename:
t = odo(
csvfilename,
'sqlite:///%s::mytable' % dbfilename,
)
with tmpfile('.csv') as fn:
q = t.select(t.c.a == 1)
r = odo(q, fn)
assert sorted(odo(q, list)) == sorted(odo(r, list))
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/tests/test_sqlite.py",
"copies": "1",
"size": "5015",
"license": "bsd-3-clause",
"hash": 949670358277004900,
"line_mean": 33.8263888889,
"line_max": 80,
"alpha_frac": 0.5411764706,
"autogenerated": false,
"ratio": 3.361260053619303,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9399092078455875,
"avg_score": 0.000668889152685449,
"num_lines": 144
} |
from __future__ import absolute_import, division, print_function
import os
from ply.lex import TOKEN, lex
__all__ = ['create_lexer', 'tokens']
keyworks = {'BEGIN', 'END', 'INPUT', 'OUTPUT'}
tokens = [
# with value.
'BOOLEAN',
'ID',
'SHORT_STRING',
'LONG_STRING',
'FLOAT_NUMBER',
'DECIMAL_INTEGER',
# without value.
'EQUAL_SIGN',
'COMMA',
'COLON',
'L_BRACKET',
'R_BRACKET',
'L_BRACE',
'R_BRACE',
]
tokens = tokens + list(keyworks)
# basic elements.
BOOLEAN_TRUE = r'ture'
BOOLEAN_FALSE = r'false'
SIGNS = r'[-+]'
DIGIT = r'\d'
NON_ZERO_DIGIT = r'[1-9]'
ALPHABET = r'[a-zA-Z_]'
NON_SPACE = r'\S'
DOUBLE_QUOTE = r'"'
TRIBAL_DOUBLE_QUOTE = r'"""'
SHORT_STRING_CHAR = r'[^"\n\\]'
LONG_STRING_CHAR = r'[^\\]'
ESCAPE_SEQ = r'\\.'
# tokens with value.
r_BOOLEAN = r'{0}|{1}'.format(
BOOLEAN_TRUE, BOOLEAN_FALSE,
)
r_ID = r'{0}({0}|{1})*'.format(
ALPHABET, DIGIT,
)
r_SHORT_STRING = r'{0}({1}|{2})*?{0}'.format(
DOUBLE_QUOTE, SHORT_STRING_CHAR, ESCAPE_SEQ,
)
r_LONG_STRING = r'{0}({1}|{2})*?{0}'.format(
TRIBAL_DOUBLE_QUOTE, LONG_STRING_CHAR, ESCAPE_SEQ,
)
r_FLOAT_NUMBER = r'{0}?({1}+(\.){1}+)'.format(
SIGNS, DIGIT,
)
r_DECIMAL_INTEGER = r'{0}?(({1}{2}*)|0)'.format(
SIGNS, NON_ZERO_DIGIT, DIGIT,
)
@TOKEN(r_BOOLEAN)
def t_BOOLEAN(t):
t.value = True if t.value == BOOLEAN_TRUE else False
return t
@TOKEN(r_ID)
def t_ID(t):
keyword_value = t.value.upper()
t.type = keyword_value if keyword_value in keyworks else 'ID'
return t
@TOKEN(r_LONG_STRING)
def t_LONG_STRING(t):
# remove TRIBAL_DOUBLE_QUOTE.
chars = list(t.value[3:-3])
# remove leading and tailing newline character.
NEWLINE = '\n'
if chars[0] == NEWLINE:
chars.pop(0)
if chars[-1] == NEWLINE:
chars.pop()
t.value = ''.join(chars)
return t
@TOKEN(r_SHORT_STRING)
def t_SHORT_STRING(t):
# remove DOUBLE_QUOTE.
t.value = t.value[1:-1]
return t
@TOKEN(r_FLOAT_NUMBER)
def t_FLOAT_NUMBER(t):
t.value = float(t.value)
return t
@TOKEN(r_DECIMAL_INTEGER)
def t_DECIMAL_INTEGER(t):
t.value = int(t.value)
return t
# tokens without value.
t_EQUAL_SIGN = r'='
t_COMMA = r','
t_COLON = r':'
t_L_BRACKET = r'\['
t_R_BRACKET = r'\]'
t_L_BRACE = r'\{'
t_R_BRACE = r'\}'
t_ignore_COMMENT = r'\#.*'
t_ignore_WHITESPACE = r'\s'
# misc configurations.
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
print("Lexer Error: [{0}]{1}".format(t.value[0], t.value[1:]))
t.lexer.skip(1)
def create_lexer():
return lex(
debug=0,
optimize=1,
lextab="generated_lextab",
outputdir=os.path.dirname(__file__),
)
| {
"repo_name": "huntzhan/tcg",
"path": "tcg/ast/lexer.py",
"copies": "1",
"size": "2722",
"license": "mit",
"hash": 3271167666811264000,
"line_mean": 18.034965035,
"line_max": 66,
"alpha_frac": 0.5808229243,
"autogenerated": false,
"ratio": 2.632495164410058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3713318088710058,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from six import moves
import ssl
import tflearn
from tflearn.data_utils import *
path = "../../data/US_Cities.txt"
if not os.path.isfile(path):
context = ssl._create_unverified_context()
moves.urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/US_Cities.txt", path, context=context)
maxlen = 20
string_utf8 = open(path, "r").read()
X, Y, char_idx = \
string_to_semi_redundant_sequences(string_utf8, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='lgc-ckpt/model_us_cities')
for i in range(40):
seed = random_sequence_from_string(string_utf8, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='us_cities')
print("-- TESTING...")
print("-- Test with temperature of 1.2 --")
print(m.generate(30, temperature=1.2, seq_seed=seed).encode('utf-8'))
print("-- Test with temperature of 1.0 --")
print(m.generate(30, temperature=1.0, seq_seed=seed).encode('utf-8'))
print("-- Test with temperature of 0.5 --")
print(m.generate(30, temperature=0.5, seq_seed=seed).encode('utf-8'))
| {
"repo_name": "hashware/tflearn-learn",
"path": "examples/nlp/lstm_generator_cityname.py",
"copies": "1",
"size": "1746",
"license": "mit",
"hash": 3024734390405894000,
"line_mean": 37.8,
"line_max": 153,
"alpha_frac": 0.6512027491,
"autogenerated": false,
"ratio": 3.117857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9267031201693159,
"avg_score": 0.00040573805279687635,
"num_lines": 45
} |
from __future__ import absolute_import, division, print_function
import os
from time import ctime
from qtpy import QtWidgets
from glue import core
from glue.utils.qt import load_ui
class MessageWidget(QtWidgets.QWidget, core.hub.HubListener):
""" This simple class displays all messages broadcast
by a hub. It is mainly intended for debugging """
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.ui = load_ui('message_widget.ui', self,
directory=os.path.dirname(__file__))
self.ui.messageTable.setColumnCount(3)
labels = ['Time', 'Message', 'Sender']
self.ui.messageTable.setHorizontalHeaderLabels(labels)
def register_to_hub(self, hub):
# catch all messages
hub.subscribe(self, core.message.Message,
handler=self.process_message,
filter=lambda x: True)
def process_message(self, message):
row = self.ui.messageTable.rowCount() * 0
self.ui.messageTable.insertRow(0)
tm = QtWidgets.QTableWidgetItem(ctime().split()[3])
typ = str(type(message)).split("'")[-2].split('.')[-1]
mtyp = QtWidgets.QTableWidgetItem(typ)
typ = str(type(message.sender)).split("'")[-2].split('.')[-1]
sender = QtWidgets.QTableWidgetItem(typ)
self.ui.messageTable.setItem(row, 0, tm)
self.ui.messageTable.setItem(row, 1, mtyp)
self.ui.messageTable.setItem(row, 2, sender)
self.ui.messageTable.resizeColumnsToContents()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/qt/message_widget.py",
"copies": "5",
"size": "1541",
"license": "bsd-3-clause",
"hash": -4403802043117371000,
"line_mean": 37.525,
"line_max": 69,
"alpha_frac": 0.6378974692,
"autogenerated": false,
"ratio": 3.871859296482412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7009756765682411,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import argparse
import torch
from envs import create_atari_env
from model import ES
from train import train_loop, render_env
parser = argparse.ArgumentParser(description='ES')
parser.add_argument('--env-name', default='PongDeterministic-v4',
metavar='ENV', help='environment')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate')
parser.add_argument('--lr-decay', type=float, default=1, metavar='LRD',
help='learning rate decay')
parser.add_argument('--sigma', type=float, default=0.05, metavar='SD',
help='noise standard deviation')
parser.add_argument('--useAdam', action='store_true',
help='bool to determine if to use adam optimizer')
parser.add_argument('--n', type=int, default=40, metavar='N',
help='batch size, must be even')
parser.add_argument('--max-episode-length', type=int, default=100000,
metavar='MEL', help='maximum length of an episode')
parser.add_argument('--max-gradient-updates', type=int, default=100000,
metavar='MGU', help='maximum number of updates')
parser.add_argument('--restore', default='', metavar='RES',
help='checkpoint from which to restore')
parser.add_argument('--small-net', action='store_true',
help='Use simple MLP on CartPole')
parser.add_argument('--variable-ep-len', action='store_true',
help="Change max episode length during training")
parser.add_argument('--silent', action='store_true',
help='Silence print statements during training')
parser.add_argument('--test', action='store_true',
help='Just render the env, no training')
if __name__ == '__main__':
args = parser.parse_args()
assert args.n % 2 == 0
if args.small_net and args.env_name not in ['CartPole-v0', 'CartPole-v1',
'MountainCar-v0']:
args.env_name = 'CartPole-v1'
print('Switching env to CartPole')
env = create_atari_env(args.env_name)
chkpt_dir = 'checkpoints/%s/' % args.env_name
if not os.path.exists(chkpt_dir):
os.makedirs(chkpt_dir)
synced_model = ES(env.observation_space.shape[0],
env.action_space, args.small_net)
for param in synced_model.parameters():
param.requires_grad = False
if args.restore:
state_dict = torch.load(args.restore)
synced_model.load_state_dict(state_dict)
if args.test:
render_env(args, synced_model, env)
else:
train_loop(args, synced_model, env, chkpt_dir)
| {
"repo_name": "atgambardella/pytorch-es",
"path": "main.py",
"copies": "1",
"size": "2760",
"license": "mit",
"hash": -5320085673543132000,
"line_mean": 41.4615384615,
"line_max": 77,
"alpha_frac": 0.6184782609,
"autogenerated": false,
"ratio": 3.786008230452675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9901637488503672,
"avg_score": 0.0005698005698005698,
"num_lines": 65
} |
from __future__ import (absolute_import, division, print_function)
import os
import contextlib
import pytest
import numpy as np
from netCDF4 import Dataset
from gridded.pyugrid.ugrid import UGrid
from gridded.pyugrid.grid_io import load_from_varnames
# @pytest.fixture
@contextlib.contextmanager
def non_compliant_mesh(fname):
"""
Dummy file based on:
https://gnome.orr.noaa.gov/py_gnome_testdata/COOPSu_CREOFS.nc
"""
nc = Dataset(fname, 'w', diskless=True)
nc.grid_type = 'Triangular'
nc.createDimension('nbi', 4)
nc.createDimension('three', 3)
nc.createDimension('nbnd', 5443)
nc.createDimension('node', 74061)
nc.createDimension('nele', 142684)
bnd = nc.createVariable('bnd', 'i4', dimensions=('nbnd', 'nbi'))
bnd[:] = np.random.random((5443, 4))
lon = nc.createVariable('lon', 'f4', dimensions=('node'))
lon[:] = np.random.random((74061))
lat = nc.createVariable('lat', 'f4', dimensions=('node'))
lat[:] = np.random.random((74061))
nbe = nc.createVariable('nbe', 'i4', dimensions=('three', 'nele'))
nbe.order = 'ccw'
nbe[:] = np.random.random((3, 142684))
nv = nc.createVariable('nv', 'i4', dimensions=('three', 'nele'))
nv[:] = np.random.random((3, 142684))
try:
yield nc
finally:
nc.close()
def test_load_from_varnames_good_mapping():
mapping = {'attribute_check': ('grid_type', 'triangular'),
'faces': 'nv',
'nodes_lon': 'lon',
'nodes_lat': 'lat',
'boundaries': 'bnd',
'face_face_connectivity': 'nbe'}
fname = 'non_compliant_ugrid.nc'
with non_compliant_mesh(fname) as ds:
ug = load_from_varnames(ds, mapping)
assert isinstance(ug, UGrid)
def test_load_from_varnames_bad_mapping():
mapping = {'attribute_check': ('grid_type', 'triangular'),
'faces': 'nv',
'nodes_lon': 'longitude',
'nodes_lat': 'latitude',
'boundaries': 'bnd',
'face_face_connectivity': 'nbe'}
fname = 'non_compliant_ugrid.nc'
with non_compliant_mesh(fname) as ds:
with pytest.raises(KeyError):
load_from_varnames(ds, mapping)
| {
"repo_name": "NOAA-ORR-ERD/gridded",
"path": "gridded/tests/test_ugrid/test_io_util.py",
"copies": "1",
"size": "2237",
"license": "unlicense",
"hash": -1073469762407732700,
"line_mean": 28.0519480519,
"line_max": 70,
"alpha_frac": 0.5994635673,
"autogenerated": false,
"ratio": 3.289705882352941,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9322758587787534,
"avg_score": 0.013282172373081463,
"num_lines": 77
} |
from __future__ import absolute_import, division, print_function
import os
import copy
import fnmatch
import warnings
from glob import glob
from io import BytesIO
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
from datetime import datetime, timedelta
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
# Scientific stack.
import pytz
import numpy as np
from owslib import fes
from owslib.ows import ExceptionReport
from owslib.swe.sensor.sml import SensorML
from pandas import Panel, DataFrame, read_csv, concat
from netCDF4 import Dataset, MFDataset, date2index, num2date
import iris
from iris.pandas import as_data_frame
import requests
from lxml import etree
from bs4 import BeautifulSoup
# Local.
from .pytools import url_lister, parse_url
iris.FUTURE.netcdf_promote = True
iris.FUTURE.cell_datetime_objects = True
__all__ = ['get_model_name',
'secoora2df',
'secoora_buoys',
'load_secoora_ncs',
'fes_date_filter',
'service_urls',
'collector2table',
'sos_request',
'get_ndbc_longname',
'get_coops_metadata',
'pyoos2df',
'ndbc2df',
'nc2df',
'CF_names',
'titles',
'fix_url',
'fetch_range',
'start_log',
'is_station']
salinity = ['sea_water_salinity',
'sea_surface_salinity',
'sea_water_absolute_salinity',
'sea_water_practical_salinity']
temperature = ['sea_water_temperature',
'sea_surface_temperature',
'sea_water_potential_temperature',
'equivalent_potential_temperature',
'sea_water_conservative_temperature',
'pseudo_equivalent_potential_temperature']
water_level = ['sea_surface_height',
'sea_surface_elevation',
'sea_surface_height_above_geoid',
'sea_surface_height_above_sea_level',
'water_surface_height_above_reference_datum',
'sea_surface_height_above_reference_ellipsoid']
speed_direction = ['sea_water_speed', 'direction_of_sea_water_velocity']
u = ['surface_eastward_sea_water_velocity',
'eastward_sea_water_velocity',
'sea_water_x_velocity',
'x_sea_water_velocity',
'eastward_transformed_eulerian_mean_velocity',
'eastward_sea_water_velocity_assuming_no_tide']
v = ['northward_sea_water_velocity',
'surface_northward_sea_water_velocity',
'sea_water_y_velocity',
'y_sea_water_velocity',
'northward_transformed_eulerian_mean_velocity',
'northward_sea_water_velocity_assuming_no_tide']
"""
'surface_geostrophic_sea_water_x_velocity',
'surface_geostrophic_sea_water_y_velocity'
'surface_geostrophic_eastward_sea_water_velocity',
'surface_geostrophic_northward_sea_water_velocity',
'baroclinic_eastward_sea_water_velocity',
'baroclinic_northward_sea_water_velocity',
'barotropic_eastward_sea_water_velocity',
'barotropic_northward_sea_water_velocity',
'barotropic_sea_water_x_velocity',
'barotropic_sea_water_y_velocity',
'bolus_eastward_sea_water_velocity',
'bolus_northward_sea_water_velocity',
'bolus_sea_water_x_velocity',
'bolus_sea_water_y_velocity',
'surface_eastward_geostrophic_sea_water_velocity',
'surface_northward_geostrophic_sea_water_velocity',
'surface_geostrophic_sea_water_x_velocity_assuming_sea_level_for_geoid',
'surface_geostrophic_sea_water_y_velocity_assuming_sea_level_for_geoid',
'surface_geostrophic_eastward_sea_water_velocity_assuming_sea_level_for_geoid',
'surface_geostrophic_northward_sea_water_velocity_assuming_sea_level_for_geoid',
'surface_eastward_geostrophic_sea_water_velocity_assuming_sea_level_for_geoid',
'surface_northward_geostrophic_sea_water_velocity_assuming_sea_level_for_geoid'
"""
CF_names = dict({'salinity': salinity,
'sea_water_temperature': temperature,
'currents': dict(u=u, v=v, speed_direction=speed_direction),
'water_surface_height_above_reference_datum': water_level})
titles = dict(SABGOM='http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/SABGOM_Forecast_Model_Run_Collection_best.ncd', # noqa
SABGOM_ARCHIVE='http://omgarch1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/SABGOM_Forecast_Model_Run_Collection_best.ncd', # noqa
USEAST='http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/us_east/US_East_Forecast_Model_Run_Collection_best.ncd', # noqa
COAWST_4='http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/coawst_4_use_best.ncd', # noqa
ESPRESSO='http://tds.marine.rutgers.edu/thredds/dodsC/roms/espresso/2013_da/his_Best/ESPRESSO_Real-Time_v2_History_Best_Available_best.ncd', # noqa
BTMPB='http://oos.soest.hawaii.edu/thredds/dodsC/hioos/tide_pac', # noqa
TBOFS='http://opendap.co-ops.nos.noaa.gov/thredds/dodsC/TBOFS/fmrc/Aggregated_7_day_TBOFS_Fields_Forecast_best.ncd', # noqa
HYCOM='http://oos.soest.hawaii.edu/thredds/dodsC/pacioos/hycom/global', # noqa
CBOFS='http://opendap.co-ops.nos.noaa.gov/thredds/dodsC/CBOFS/fmrc/Aggregated_7_day_CBOFS_Fields_Forecast_best.ncd', # noqa
ESTOFS='http://geoport-dev.whoi.edu/thredds/dodsC/estofs/atlantic', # noqa
NECOFS_GOM3_FVCOM='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc', # noqa
NECOFS_GOM3_WAVE='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_WAVE_FORECAST.nc', # noqa
USF_ROMS='http://crow.marine.usf.edu:8080/thredds/dodsC/WFS_ROMS_NF_model/USF_Ocean_Circulation_Group_West_Florida_Shelf_Daily_ROMS_Nowcast_Forecast_Model_Data_best.ncd', # noqa
USF_SWAN='http://crow.marine.usf.edu:8080/thredds/dodsC/WFS_SWAN_NF_model/USF_Ocean_Circulation_Group_West_Florida_Shelf_Daily_SWAN_Nowcast_Forecast_Wave_Model_Data_best.ncd', # noqa
USF_FVCOM='http://crow.marine.usf.edu:8080/thredds/dodsC/FVCOM-Nowcast-Agg.nc' # noqa
)
def fix_url(start, url):
"""
If dates are older than 30 days switch URL prefix to archive.
NOTE: start must be non-naive datetime object.
Examples
--------
>>> from datetime import datetime
>>> import pytz
>>> start = datetime(2010, 1, 1).replace(tzinfo=pytz.utc)
>>> url = ('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/'
... 'sabgom/SABGOM_Forecast_Model_Run_Collection_best.ncd')
>>> new_url = fix_url(start, url)
>>> new_url.split('/')[2]
'omgarch1.meas.ncsu.edu:8080'
"""
diff = (datetime.utcnow().replace(tzinfo=pytz.utc)) - start
if diff > timedelta(days=30):
url = url.replace('omgsrv1', 'omgarch1')
return url
def _remove_parenthesis(word):
"""
Examples
--------
>>> _remove_parenthesis("(ROMS)")
'ROMS'
"""
try:
return word[word.index("(") + 1:word.rindex(")")]
except ValueError:
return word
def _guess_name(model_full_name):
"""
Examples
--------
>>> some_names = ['USF FVCOM - Nowcast Aggregation',
... 'ROMS/TOMS 3.0 - New Floria Shelf Application',
... 'COAWST Forecast System : USGS : US East Coast and Gulf'
... 'of Mexico (Experimental)',
... 'ROMS/TOMS 3.0 - South-Atlantic Bight and Gulf of'
... 'Mexico',
... 'HYbrid Coordinate Ocean Model (HYCOM): Global',
... 'ROMS ESPRESSO Real-Time Operational IS4DVAR Forecast'
... 'System Version 2 (NEW) 2013-present FMRC History'
... '(Best)']
>>> [_guess_name(model_full_name) for model_full_name in some_names]
['USF_FVCOM', 'ROMS/TOMS', 'COAWST_USGS', 'ROMS/TOMS', 'HYCOM', \
'ROMS_ESPRESSO']
"""
words = []
for word in model_full_name.split():
if word.isupper():
words.append(_remove_parenthesis(word))
mod_name = ' '.join(words)
if not mod_name:
mod_name = ''.join([c for c in model_full_name.split('(')[0]
if c.isupper()])
if len(mod_name.split()) > 1:
mod_name = '_'.join(mod_name.split()[:2])
return mod_name
def _sanitize(name):
"""
Examples
--------
>>> _sanitize('ROMS/TOMS')
'ROMS_TOMS'
>>> _sanitize('USEAST model')
'USEAST_model'
>>> _sanitize('GG1SST, SST')
'GG1SST_SST'
"""
name = name.replace(', ', '_')
name = name.replace('/', '_')
name = name.replace(' ', '_')
name = name.replace(',', '_')
return name
def get_model_name(cube, url):
"""
Return a model short and long name from a cube.
Examples
--------
>>> import iris
>>> import warnings
>>> url = ('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/'
... 'SABGOM_Forecast_Model_Run_Collection_best.ncd')
>>> with warnings.catch_warnings():
... warnings.simplefilter("ignore") # Suppress iris warnings.
... cube = iris.load_cube(url, "sea_water_potential_temperature")
>>> get_model_name(cube, url)
('SABGOM', 'ROMS/TOMS 3.0 - South-Atlantic Bight and Gulf of Mexico')
"""
url = parse_url(url)
# [model_full_name]: if there is no title assign the URL.
try:
model_full_name = cube.attributes.get('title', url)
except AttributeError:
model_full_name = url
# [mod_name]: first searches the titles dictionary, if not try to guess.
for mod_name, uri in titles.items():
if url == uri:
return mod_name, model_full_name
warnings.warn('Model %s not in the list. Guessing' % url)
mod_name = _guess_name(model_full_name)
mod_name = _sanitize(mod_name)
return mod_name, model_full_name
def _extract_columns(name, cube):
"""
Workaround to extract data from a cube and create a dataframe
following SOS boilerplate.
"""
station = cube.attributes.get('abstract', None)
if not station:
station = name.replace('.', '_')
parser = HTMLParser()
station = parser.unescape(station)
sensor = 'NA'
lon = cube.coord(axis='X').points[0]
lat = cube.coord(axis='Y').points[0]
time = cube.coord(axis='T')
time = time.units.num2date(cube.coord(axis='T').points)[0]
date_time = time.strftime('%Y-%M-%dT%H:%M:%SZ')
data = cube.data.mean()
return station, sensor, lat, lon, date_time, data
def secoora2df(buoys, varname):
"""
This function assumes a global cube object.
FIXME: Consider removing from the packages and add directly in the
notebook for clarity.
"""
secoora_obs = dict()
for station, cube in buoys.items():
secoora_obs.update({station: _extract_columns(station, cube)})
df = DataFrame.from_dict(secoora_obs, orient='index')
df.reset_index(inplace=True)
columns = {'index': 'station',
0: 'name',
1: 'sensor',
2: 'lat',
3: 'lon',
4: 'date_time',
5: varname}
df.rename(columns=columns, inplace=True)
df.set_index('name', inplace=True)
return df
def is_station(url):
"""
Return True is cdm_data_type exists and is equal to 'station;
Examples
--------
>>> url = ('http://thredds.cdip.ucsd.edu/thredds/dodsC/'
... 'cdip/archive/144p1/144p1_historic.nc')
>>> is_station(url)
True
>>> url = ("http://comt.sura.org/thredds/dodsC/data/comt_1_archive/"
... "inundation_tropical/VIMS_SELFE/"
... "Hurricane_Ike_2D_final_run_without_waves")
>>> is_station(url)
False
"""
nc = Dataset(url)
station = False
if hasattr(nc, 'cdm_data_type'):
if nc.cdm_data_type.lower() == 'station':
station = True
return station
def _load_nc(nc):
if isinstance(nc, Dataset):
return nc
else:
return Dataset(nc)
def source_of_data(nc, coverage_content_type='modelResult'):
"""
Check if the `coverage_content_type` of the cude.
The `coverage_content_type` is an ISO 19115-1 code to indicating the
source of the data types and can be one of the following:
image, thematicClassification, physicalMeasurement, auxiliaryInformation,
qualityInformation, referenceInformation, modelResult, coordinate
Examples
--------
>>> url = ('http://comt.sura.org/thredds/dodsC/data/comt_1_archive/'
... 'inundation_tropical/VIMS_SELFE/'
... 'Hurricane_Ike_2D_final_run_without_waves')
>>> nc = Dataset(url)
>>> source_of_data(nc)
True
>>> url = ('http://thredds.axiomdatascience.com/thredds/'
... 'dodsC/G1_SST_GLOBAL.nc')
>>> source_of_data(url) # False positive!
True
OBS: `source_of_data` assumes that the presence of one
coverage_content_type` variable means the whole Dataset **is** the same
`coverage_content_type`!
"""
nc = _load_nc(nc)
if nc.get_variables_by_attributes(coverage_content_type=coverage_content_type): # noqa
return True
return False
def is_model(nc):
"""
Heuristic way to find if a netCDF4 object is "modelResult" or not.
WARNING: This function may return False positives and False
negatives!!!
Examples
--------
>>> models = ['http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/sabgom/SABGOM_Forecast_Model_Run_Collection_best.ncd',
... 'http://crow.marine.usf.edu:8080/thredds/dodsC/FVCOM-Nowcast-Agg.nc',
... 'http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/coawst_4_use_best.ncd',
... 'http://oos.soest.hawaii.edu/thredds/dodsC/hioos/tide_pac',
... 'http://opendap.co-ops.nos.noaa.gov/thredds/dodsC/TBOFS/fmrc/Aggregated_7_day_TBOFS_Fields_Forecast_best.ncd',
... 'http://oos.soest.hawaii.edu/thredds/dodsC/pacioos/hycom/global',
... 'http://opendap.co-ops.nos.noaa.gov/thredds/dodsC/CBOFS/fmrc/Aggregated_7_day_CBOFS_Fields_Forecast_best.ncd',
... 'http://geoport-dev.whoi.edu/thredds/dodsC/estofs/atlantic',
... 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_WAVE_FORECAST.nc',
... 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc'] # noqa
>>> all([is_model(url) for url in models])
True
>>> not_model = ['http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/archive/043p1/043p1_d17.nc',
... 'http://thredds.axiomalaska.com/thredds/dodsC/Aquarius_V3_SSS_Daily.nc',
... 'http://thredds.axiomalaska.com/thredds/dodsC/Aquarius_V3_scat_wind_speed_Weekly.nc',
... 'http://thredds.axiomdatascience.com/thredds/dodsC/G1_SST_GLOBAL.nc']
>>> any([is_model(url) for url in not_model])
False
"""
nc = _load_nc(nc)
# First criteria (Strong): `UGRID/SGRID`
if hasattr(nc, 'Conventions'):
if 'ugrid' in nc.Conventions.lower():
return True
if hasattr(nc, 'Conventions'):
if 'sgrid' in nc.Conventions.lower():
return True
# Second criteria (Strong): dimensionless coords are present.
vs = nc.get_variables_by_attributes(formula_terms=lambda v: v is not None)
if vs:
return True
# Third criteria (weak): Assumes that all "GRID" attribute are models.
cdm_data_type = nc.getncattr('cdm_data_type') if hasattr(nc, 'cdm_data_type') else '' # noqa
feature_type = nc.getncattr('featureType') if hasattr(nc, 'featureType') else '' # noqa
grid, keyword, title = False, False, False
grid = any([info.lower() == 'grid' for info in [cdm_data_type, feature_type]]) # noqa
words = ['pom', 'hycom', 'fvcom', 'roms', 'numerical',
'simulation', 'Circulation Models']
if hasattr(nc, 'keywords'):
keyword = any(word in nc.getncattr('keywords') for word in words)
if hasattr(nc, 'title'):
title = any(word in nc.getncattr('title') for word in words)
if any([title, keyword]) and grid:
return True
return False
def secoora_buoys():
"""
Returns a generator with secoora catalog_platforms URLs.
Examples
---------
>>> import types
>>> try:
... from urlparse import urlparse
... except ImportError:
... from urllib.parse import urlparse
>>> buoys = secoora_buoys()
>>> isinstance(buoys, types.GeneratorType)
True
>>> url = list(buoys)[0]
>>> bool(urlparse(url).scheme)
True
"""
thredds = "http://129.252.139.124/thredds/catalog_platforms.html"
urls = url_lister(thredds)
base_url = "http://129.252.139.124/thredds/dodsC"
for buoy in urls:
if (("?dataset=" in buoy) and
('archive' not in buoy) and
('usf.c12.weatherpak' not in buoy) and
('cormp.ocp1.buoy' not in buoy)):
try:
buoy = buoy.split('id_')[1]
except IndexError:
buoy = buoy.split('=')[1]
if buoy.endswith('.nc'):
buoy = buoy[:-3]
url = '{}/{}.nc'.format(base_url, buoy)
yield url
def _secoora_buoys():
"""
TODO: BeautifulSoup alternative.
"""
from bs4 import BeautifulSoup
thredds = "http://129.252.139.124/thredds/catalog_platforms.html"
connection = urlopen(thredds)
page = connection.read()
connection.close()
soup = BeautifulSoup(page, "lxml")
base_url = "http://129.252.139.124/thredds/dodsC"
for a in soup.find_all("a"):
href = a.get('href')
if "?dataset=" in href:
buoy = a.next_element.string
url = '{}/{}.nc.html'.format(base_url, buoy)
yield url
def load_secoora_ncs(run_name):
"""
Loads local files using the run_name date.
NOTE: Consider moving this inside the notebook.
"""
fname = '{}-{}.nc'.format
OBS_DATA = nc2df(os.path.join(run_name,
fname(run_name, 'OBS_DATA')))
SECOORA_OBS_DATA = nc2df(os.path.join(run_name,
fname(run_name, 'SECOORA_OBS_DATA')))
ALL_OBS_DATA = concat([OBS_DATA, SECOORA_OBS_DATA], axis=1)
index = ALL_OBS_DATA.index
dfs = dict(OBS_DATA=ALL_OBS_DATA)
for fname in glob(os.path.join(run_name, "*.nc")):
if 'OBS_DATA' in fname:
continue
else:
model = fname.split('.')[0].split('-')[-1]
df = nc2df(fname)
# FIXME: Horrible work around duplicate times.
if len(df.index.values) != len(np.unique(df.index.values)):
kw = dict(subset='index', keep='last')
df = df.reset_index().drop_duplicates(**kw).set_index('index')
kw = dict(method='time', limit=30)
df = df.reindex(index).interpolate(**kw).ix[index]
dfs.update({model: df})
return Panel.fromDict(dfs).swapaxes(0, 2)
def fes_date_filter(start, stop, constraint='overlaps'):
"""
Take datetime-like objects and returns a fes filter for date range
(begin and end inclusive).
NOTE: Truncates the minutes!!!
Examples
--------
>>> from datetime import datetime, timedelta
>>> stop = datetime(2010, 1, 1, 12, 30, 59).replace(tzinfo=pytz.utc)
>>> start = stop - timedelta(days=7)
>>> begin, end = fes_date_filter(start, stop, constraint='overlaps')
>>> begin.literal, end.literal
('2010-01-01 12:00', '2009-12-25 12:00')
>>> begin.propertyoperator, end.propertyoperator
('ogc:PropertyIsLessThanOrEqualTo', 'ogc:PropertyIsGreaterThanOrEqualTo')
>>> begin, end = fes_date_filter(start, stop, constraint='within')
>>> begin.literal, end.literal
('2009-12-25 12:00', '2010-01-01 12:00')
>>> begin.propertyoperator, end.propertyoperator
('ogc:PropertyIsGreaterThanOrEqualTo', 'ogc:PropertyIsLessThanOrEqualTo')
"""
start = start.strftime('%Y-%m-%d %H:00')
stop = stop.strftime('%Y-%m-%d %H:00')
if constraint == 'overlaps':
propertyname = 'apiso:TempExtent_begin'
begin = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname,
literal=stop)
propertyname = 'apiso:TempExtent_end'
end = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname,
literal=start)
elif constraint == 'within':
propertyname = 'apiso:TempExtent_begin'
begin = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname,
literal=start)
propertyname = 'apiso:TempExtent_end'
end = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname,
literal=stop)
else:
raise NameError('Unrecognized constraint {}'.format(constraint))
return begin, end
def service_urls(records, services):
"""
Extract service_urls of a specific type (DAP, SOS) from csw records.
Example: services=['urn:x-esri:specification:ServiceType:odp:url']
For more strings check:
https://raw.githubusercontent.com/OSGeo/Cat-Interop/master/LinkPropertyLookupTable.csv
"""
urls = []
for service in services:
for key, rec in records.items():
url = next((d['url'] for d in rec.references if
d['scheme'] == service), None)
if url is not None:
urls.append(url)
urls = sorted(set(urls))
return urls
def collector2table(collector):
"""
collector2table return a station table as a DataFrame.
columns are station, sensor, lon, lat, and the index is the station
number.
This is a substitute for `sos_request`.
"""
# This accepts only 1-day request, but since we only want the
# stations available we try again with end=start.
c = copy.copy(collector)
try:
response = c.raw(responseFormat="text/csv")
except ExceptionReport:
response = c.filter(end=c.start_time).raw(responseFormat="text/csv")
df = read_csv(BytesIO(response.encode('utf-8')),
parse_dates=True)
columns = {'sensor_id': 'sensor',
'station_id': 'station',
'latitude (degree)': 'lat',
'longitude (degree)': 'lon'}
df.rename(columns=columns, inplace=True)
df['sensor'] = [s.split(':')[-1] for s in df['sensor']]
df['station'] = [s.split(':')[-1] for s in df['station']]
df = df[['station', 'sensor', 'lon', 'lat']]
g = df.groupby('station')
df = dict()
for station in g.groups.keys():
df.update({station: g.get_group(station).iloc[0]})
return DataFrame.from_dict(df).T
def sos_request(url='opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS', **kw):
"""
Examples
--------
>>> try:
... from urlparse import urlparse
... except ImportError:
... from urllib.parse import urlparse
>>> from datetime import date, datetime, timedelta
>>> today = date.today().strftime("%Y-%m-%d")
>>> start = datetime.strptime(today, "%Y-%m-%d") - timedelta(7)
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> sos_name = 'water_surface_height_above_reference_datum'
>>> offering='urn:ioos:network:NOAA.NOS.CO-OPS:WaterLevelActive'
>>> params = dict(observedProperty=sos_name,
... eventTime=start.strftime('%Y-%m-%dT%H:%M:%SZ'),
... featureOfInterest='BBOX:{0},{1},{2},{3}'.format(*bbox),
... offering=offering)
>>> uri = 'http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS'
>>> url = sos_request(uri, **params)
>>> bool(urlparse(url).scheme)
True
"""
url = parse_url(url)
offering = 'urn:ioos:network:NOAA.NOS.CO-OPS:CurrentsActive'
params = dict(service='SOS',
request='GetObservation',
version='1.0.0',
offering=offering,
responseFormat='text/csv')
params.update(kw)
r = requests.get(url, params=params)
r.raise_for_status()
content = r.headers['Content-Type']
if 'excel' in content or 'csv' in content:
return r.url
else:
raise TypeError('Bad url {}'.format(r.url))
def get_ndbc_longname(station):
"""
Get long_name for specific station from NOAA NDBC.
Examples
--------
>>> str(get_ndbc_longname(31005))
'Sw Extension'
>>> str(get_ndbc_longname(44013))
'Boston 16 Nm East Of Boston'
"""
url = "http://www.ndbc.noaa.gov/station_page.php"
params = dict(station=station)
r = requests.get(url, params=params)
r.raise_for_status()
soup = BeautifulSoup(r.content, "lxml")
# NOTE: Should be only one!
long_name = soup.findAll("h1")[0]
long_name = long_name.text.split(' - ')[1].strip()
long_name = long_name.split(',')[0].strip()
return long_name.title()
def _get_value(sensor, name='longName'):
value = None
sml = sensor.get(name, None)
if sml:
value = sml.value
return value
def get_coops_metadata(station):
"""
Get longName and sensorName for specific station from COOPS SOS using
DescribeSensor and owslib.swe.sensor.sml.SensorML.
Examples
--------
>>> long_name, station_id = get_coops_metadata(8651370)
>>> long_name
'Duck, NC'
>>> station_id
'urn:ioos:station:NOAA.NOS.CO-OPS:8651370'
"""
url = ('opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS?'
'service=SOS&'
'request=DescribeSensor&version=1.0.0&'
'outputFormat=text/xml;'
'subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"&'
'procedure=urn:ioos:station:NOAA.NOS.CO-OPS:%s') % station
url = parse_url(url)
xml = etree.parse(urlopen(url))
root = SensorML(xml)
if not root.members or len(root.members) > 1:
msg = "Expected 1 member, got {}".format
raise ValueError(msg(len(root.members)))
system = root.members[0]
# NOTE: Some metadata of interest.
# system.description
# short_name = _get_value(system.identifiers, name='shortName')
# [c.values() for c in system.components]
long_name = _get_value(system.identifiers, name='longName')
# FIXME: The new CO-OPS standards sucks!
long_name = long_name.split('station, ')[-1].strip()
station_id = _get_value(system.identifiers, name='stationID')
return long_name, station_id
def pyoos2df(collector, station_id, df_name=None):
"""
Request CSV response from SOS and convert to Pandas dataframe.
"""
collector.features = [station_id]
try:
response = collector.raw(responseFormat="text/csv")
kw = dict(parse_dates=True, index_col='date_time')
df = read_csv(BytesIO(response.encode('utf-8')), **kw)
except requests.exceptions.ReadTimeout:
df = ndbc2df(collector, station_id)
# FIXME: Workaround to get only 1 sensor.
df = df.reset_index()
kw = dict(subset='date_time', keep='last')
df = df.drop_duplicates(**kw).set_index('date_time')
if df_name:
df.name = df_name
return df
def ndbc2df(collector, ndbc_id):
"""
Ugly hack because `collector.raw(responseFormat="text/csv")`
Usually times out.
"""
# FIXME: Only sea_water_temperature for now.
if len(collector.variables) > 1:
msg = "Expected only 1 variables to download, got {}".format
raise ValueError(msg(collector.variables))
if collector.variables[0] == 'sea_water_temperature':
columns = 'sea_water_temperature (C)'
ncvar = 'sea_surface_temperature'
data_type = 'stdmet'
# adcp, adcp2, cwind, dart, mmbcur, ocean, oceansites, pwind,
# swden, tao-ctd, wlevel, z-hycom
else:
msg = "Do not know how to download {}".format
raise ValueError(msg(collector.variables))
uri = 'http://dods.ndbc.noaa.gov/thredds/dodsC/data/{}'.format(data_type)
url = ('%s/%s/' % (uri, ndbc_id))
urls = url_lister(url)
filetype = "*.nc"
file_list = [filename for filename in fnmatch.filter(urls, filetype)]
files = [fname.split('/')[-1] for fname in file_list]
urls = ['%s/%s/%s' % (uri, ndbc_id, fname) for fname in files]
if not urls:
raise Exception("Cannot find data at {!r}".format(url))
nc = MFDataset(urls)
kw = dict(calendar='gregorian', select='nearest')
time_dim = nc.variables['time']
time = num2date(time_dim[:], units=time_dim.units,
calendar=kw['calendar'])
idx_start = date2index(collector.start_time.replace(tzinfo=None),
time_dim, **kw)
idx_stop = date2index(collector.end_time.replace(tzinfo=None),
time_dim, **kw)
if idx_start == idx_stop:
raise Exception("No data within time range"
" {!r} and {!r}".format(collector.start_time,
collector.end_time))
data = nc.variables[ncvar][idx_start:idx_stop, ...].squeeze()
time_dim = nc.variables['time']
time = time[idx_start:idx_stop].squeeze()
df = DataFrame(data=data, index=time, columns=[columns])
df.index.name = 'date_time'
return df
def nc2df(fname):
"""
Load a netCDF timeSeries file as a dataframe.
"""
cube = iris.load_cube(fname)
for coord in cube.coords(dimensions=[0]):
name = coord.name()
if name != 'time':
cube.remove_coord(name)
for coord in cube.coords(dimensions=[1]):
name = coord.name()
if name != 'station name':
cube.remove_coord(name)
df = as_data_frame(cube)
if cube.ndim == 1: # Horrible work around iris.
station = cube.coord('station name').points[0]
df.columns = [station]
return df
def fetch_range(start=datetime(2014, 7, 1, 12), days=6, tzinfo=pytz.utc):
"""
For hurricane Arthur week use `start=datetime(2014, 7, 0, 12)`.
"""
start = start.replace(tzinfo=tzinfo)
stop = start + timedelta(days=days)
return start, stop
def _reload_log():
"""IPython workaround."""
import imp
import logging as log
imp.reload(log)
return log
def start_log(start, stop, bbox, log_name):
log = _reload_log()
import os
import pyoos
import owslib
if not os.path.exists(log_name):
os.makedirs(log_name)
msg = 'Saving data inside directory {}'.format(log_name)
else:
msg = 'Overwriting the data inside directory {}'.format(log_name)
fmt = '{:*^64}'.format
log.captureWarnings(True)
LOG_FILENAME = 'log.txt'
LOG_FILENAME = os.path.join(log_name, LOG_FILENAME)
log.basicConfig(filename=LOG_FILENAME,
filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%I:%M:%S',
level=log.INFO,
stream=None)
log.info(fmt(msg))
log.info(fmt(' Run information '))
log.info('Run date: {:%Y-%m-%d %H:%M:%S}'.format(datetime.utcnow()))
log.info('Download start: {:%Y-%m-%d %H:%M:%S}'.format(start))
log.info('Download stop: {:%Y-%m-%d %H:%M:%S}'.format(stop))
log.info('Bounding box: {0:3.2f}, {1:3.2f},'
'{2:3.2f}, {3:3.2f}'.format(*bbox))
log.info(fmt(' Software version '))
log.info('Iris version: {}'.format(iris.__version__))
log.info('owslib version: {}'.format(owslib.__version__))
log.info('pyoos version: {}'.format(pyoos.__version__))
return log
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "ocefpaf/utilities",
"path": "utilities/secoora.py",
"copies": "2",
"size": "31850",
"license": "mit",
"hash": 5569458299481301000,
"line_mean": 34.5072463768,
"line_max": 197,
"alpha_frac": 0.6075981162,
"autogenerated": false,
"ratio": 3.209068010075567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9816459676793756,
"avg_score": 0.00004128989636236013,
"num_lines": 897
} |
from __future__ import absolute_import, division, print_function
import os
import datetime
from functools import wraps
from cytoolz import nth
from itertools import islice
from collections import Iterator
from multiprocessing.pool import ThreadPool
# these are used throughout blaze, don't remove them
from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
import psutil
import numpy as np
# Imports that replace older utils.
from .compatibility import map, zip
from .dispatch import dispatch
thread_pool = ThreadPool(psutil.NUM_CPUS)
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True)
<itertools.islice object at ...>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if not lazy and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
@dispatch(datetime.datetime)
def json_dumps(dt):
s = dt.isoformat()
if not dt.tzname():
s = s + 'Z'
return s
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/utils.py",
"copies": "1",
"size": "3794",
"license": "bsd-3-clause",
"hash": -1203885286757224700,
"line_mean": 22.7125,
"line_max": 79,
"alpha_frac": 0.5600948867,
"autogenerated": false,
"ratio": 3.3634751773049647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44235700640049647,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import datetime
import re
from weakref import WeakKeyDictionary
try:
from cytoolz import nth, memoize, unique, concat, first, drop
except ImportError:
from toolz import nth, memoize, unique, concat, first, drop
from toolz.curried.operator import setitem
from itertools import islice
from collections import Iterator
# these are used throughout blaze, don't remove them
from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
import pandas as pd
import psutil
import numpy as np
# Imports that replace older utils.
from .compatibility import map, zip
from .dispatch import dispatch
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True)
<itertools.islice object at ...>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if not lazy and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
@dispatch(datetime.datetime)
def json_dumps(dt):
if dt is pd.NaT:
# NaT has an isoformat but it is totally invalid.
# This keeps the parsing on the client side simple.
s = 'NaT'
else:
s = dt.isoformat()
if not dt.tzname():
s += 'Z'
return {'__!datetime': s}
@dispatch(frozenset)
def json_dumps(ds):
return {'__!frozenset': list(ds)}
@dispatch(datetime.timedelta)
def json_dumps(ds):
return {'__!timedelta': ds.total_seconds()}
def object_hook(obj):
"""Convert a json object dict back into a python object.
This looks for our objects that have encoded richer representations with
a ``__!{type}`` key.
Parameters
----------
obj : dict
The raw json parsed dictionary.
Returns
-------
parsed : any
The richer form of the object.
Notes
-----
The types that this reads can be extended with the ``register`` method.
For example:
>>> class MyList(list):
... pass
>>> @object_hook.register('MyList')
... def _parse_my_list(obj):
... return MyList(obj)
Register can also be called as a function like:
>>> object_hook.register('frozenset', frozenset)
"""
if len(obj) != 1:
return obj
key, = obj.keys()
if not key.startswith('__!'):
return obj
return object_hook._converters[key[len('__!'):]](obj[key])
object_hook._converters = {}
object_hook.register = setitem(object_hook._converters)
object_hook.register('datetime', pd.Timestamp)
object_hook.register('frozenset', frozenset)
@object_hook.register('timedelta')
def _read_timedelta(ds):
return datetime.timedelta(seconds=ds)
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
def weakmemoize(f):
"""Memoize ``f`` with a ``WeakKeyDictionary`` to allow the arguments
to be garbage collected.
Parameters
----------
f : callable
The function to memoize.
Returns
-------
g : callable
``f`` with weak memoiza
"""
return memoize(f, cache=WeakKeyDictionary())
def ordered_intersect(*sets):
"""Set intersection of two sequences that preserves order.
Parameters
----------
sets : tuple of Sequence
Returns
-------
generator
Examples
--------
>>> list(ordered_intersect('abcd', 'cdef'))
['c', 'd']
>>> list(ordered_intersect('bcda', 'bdfga'))
['b', 'd', 'a']
>>> list(ordered_intersect('zega', 'age')) # 1st sequence determines order
['e', 'g', 'a']
>>> list(ordered_intersect('gah', 'bag', 'carge'))
['g', 'a']
"""
common = frozenset.intersection(*map(frozenset, sets))
return (x for x in unique(concat(sets)) if x in common)
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/utils.py",
"copies": "2",
"size": "6646",
"license": "bsd-3-clause",
"hash": -9206644118380723000,
"line_mean": 22.6512455516,
"line_max": 79,
"alpha_frac": 0.5723743605,
"autogenerated": false,
"ratio": 3.494216614090431,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5066590974590431,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import datetime
import re
from bs4 import BeautifulSoup
from sunpy.extern import six
from sunpy.extern.six.moves import range, zip
from sunpy.extern.six.moves.urllib.request import urlopen
__all__ = ['Scraper']
# regular expressions to convert datetime format
TIME_CONVERSIONS = {'%Y': '\d{4}', '%y': '\d{2}',
'%b': '[A-Z]..', '%B': '\W', '%m': '\d{2}',
'%d': '\d{2}', '%j': '\d{3}',
'%H': '\d{2}', '%I': '\d{2}',
'%M': '\d{2}',
'%S': '\d{2}'}
class Scraper(object):
"""
A Scraper to scrap web data archives based on dates.
Parameters
----------
pattern : string
A string containing the url with the date encoded as
datetime formats, and any other parameter as kwargs
as string format.
Attributes
----------
pattern : string
A converted string with the kwargs.
now : datetime.datetime
The pattern with the actual date.
Examples
--------
>>> # Downloading data from SolarMonitor.org
>>> from sunpy.util.scraper import Scraper
>>> solmon_pattern = ('http://solarmonitor.org/data/'
'%Y/%m/%d/fits/{instrument}/'
'{instrument}_{wave:05d}_fd_%Y%m%d_%H%M%S.fts.gz')
>>> solmon = Scraper(solmon_pattern, instrument = 'swap', wave = 174)
>>> print(solmon.pattern)
http://solarmonitor.org/data/%Y/%m/%d/fits/swap/swap_00174_fd_%Y%m%d_%H%M%S.fts.gz
>>> print(solmon.now)
http://solarmonitor.org/data/2012/01/25/fits/swap/swap_00174_fd_20120125_173301.fts.gz
Notes
-----
The now attribute does not return an existent file, but just how the
pattern looks with the actual time.
"""
def __init__(self, pattern, **kwargs):
self.pattern = pattern.format(**kwargs)
self.now = datetime.datetime.now().strftime(self.pattern)
def matches(self, filepath, date):
return date.strftime(self.pattern) == filepath
def range(self, timerange):
"""
Gets the directories for a certain range of time
(i.e. using `~sunpy.time.TimeRange`).
Parameters
----------
timerange : `~sunpy.time.timerange.TimeRange`
Time interval where to find the directories for a given
pattern.
Returns
-------
directories : list of strings
List of all the possible directories valid for the time
range given. Notice that these directories may not exist
in the archive.
"""
#find directory structure - without file names
directorypattern = os.path.dirname(self.pattern) + '/'
#TODO what if there's not slashes?
rangedelta = timerange.dt
timestep = self._smallerPattern(directorypattern)
if timestep is None:
return [directorypattern]
else:
# Number of elements in the time range (including end)
n_steps = rangedelta.total_seconds()/timestep.total_seconds()
TotalTimeElements = int(round(n_steps)) + 1
directories = [(timerange.start + n * timestep).strftime(directorypattern)
for n in range(TotalTimeElements)] #todo if date <= endate
return directories
def _URL_followsPattern(self, url):
"""Check whether the url provided follows the pattern"""
pattern = self.pattern
for k,v in six.iteritems(TIME_CONVERSIONS):
pattern = pattern.replace(k, v)
matches = re.match(pattern, url)
if matches:
return matches.end() == matches.endpos == len(self.now)
return False
def _extractDateURL(self, url):
"""Extracts the date from a particular url following the pattern"""
# url_to_list substitutes '.' and '_' for '/' to then create
# a list of all the blocks in times - assuming they are all
# separated with either '.', '_' or '/'
url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
pattern_list = url_to_list(self.pattern)
url_list = url_to_list(url)
time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',
'%H', '%I', '%M', '%S']
final_date = []
final_pattern = []
# Find in directory and filename
for pattern_elem, url_elem in zip(pattern_list, url_list):
time_formats = [x for x in time_order if x in pattern_elem]
if len(time_formats) > 0:
final_date.append(url_elem)
final_pattern.append(pattern_elem)
for time_bit in time_formats:
time_order.remove(time_bit)
# Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
# Make all as single strings
date_together = ''.join(final_date)
pattern_together = ''.join(final_pattern)
re_together = pattern_together
for k, v in six.iteritems(TIME_CONVERSIONS):
re_together = re_together.replace(k, v)
# Create new empty lists
final_date = list()
final_pattern = list()
for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\')[1:]):
regexp = '\\{}'.format(r)
pattern = '%{}'.format(p)
date_part = re.match(regexp, date_together)
date_together = date_together[:date_part.start()] + \
date_together[date_part.end():]
if pattern not in final_pattern:
final_pattern.append('%{}'.format(p))
final_date.append(date_part.group())
return datetime.datetime.strptime(' '.join(final_date),
' '.join(final_pattern))
def filelist(self, timerange):
"""
Returns the list of existent files in the archive for the
given time range.
Parameters
----------
timerange : `~sunpy.time.TimeRange`
Time interval where to find the directories for a given
pattern.
Returns
-------
filesurls : list of strings
List of all the files found between the time range given.
Examples
--------
>>> from sunpy.time import TimeRange
>>> timerange = TimeRange('2015-01-01','2015-01-01T16:00:00')
>>> print(solmon.filelist(timerange))
['http://solarmonitor.org/data/2015/01/01/fits/swap/swap_00174_fd_20150101_025423.fts.gz']
"""
directories = self.range(timerange)
filesurls = []
for directory in directories:
try:
opn = urlopen(directory)
try:
soup = BeautifulSoup(opn)
for link in soup.find_all("a"):
href = link.get("href")
if href.endswith(self.pattern.split('.')[-1]):
fullpath = directory + href
if self._URL_followsPattern(fullpath):
datehref = self._extractDateURL(fullpath)
if (datehref >= timerange.start and
datehref <= timerange.end):
filesurls.append(fullpath)
finally:
opn.close()
except:
pass
return filesurls
def _smallerPattern(self, directoryPattern):
"""Obtain the smaller time step for the given pattern"""
try:
if "%S" in directoryPattern:
return datetime.timedelta(seconds=1)
elif "%M" in directoryPattern:
return datetime.timedelta(minutes=1)
elif any(hour in directoryPattern for hour in ["%H", "%I"]):
return datetime.timedelta(hours=1)
elif any(day in directoryPattern for day in ["%d", "%j"]):
return datetime.timedelta(days=1)
elif any(month in directoryPattern for month in ["%b","%B","%m"]):
return datetime.timedelta(days=31)
elif any(year in directoryPattern for year in ["%Y", "%y"]):
return datetime.timedelta(days=365)
else:
return None
except:
raise
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/util/scraper.py",
"copies": "1",
"size": "8456",
"license": "bsd-2-clause",
"hash": -8400999742058093000,
"line_mean": 37.4363636364,
"line_max": 98,
"alpha_frac": 0.5413907285,
"autogenerated": false,
"ratio": 4.236472945891784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0071803408486136805,
"num_lines": 220
} |
from __future__ import absolute_import, division, print_function
import os
import datetime
import re
try:
from cytoolz import nth
except ImportError:
from toolz import nth
from toolz.curried.operator import setitem
from itertools import islice
from collections import Iterator
from multiprocessing.pool import ThreadPool
# these are used throughout blaze, don't remove them
from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
import pandas as pd
import psutil
import numpy as np
# Imports that replace older utils.
from .compatibility import map, zip
from .dispatch import dispatch
thread_pool = ThreadPool(psutil.cpu_count())
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True)
<itertools.islice object at ...>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if not lazy and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
@dispatch(datetime.datetime)
def json_dumps(dt):
if dt is pd.NaT:
# NaT has an isoformat but it is totally invalid.
# This keeps the parsing on the client side simple.
s = 'NaT'
else:
s = dt.isoformat()
if not dt.tzname():
s += 'Z'
return {'__!datetime': s}
@dispatch(frozenset)
def json_dumps(ds):
return {'__!frozenset': list(ds)}
@dispatch(datetime.timedelta)
def json_dumps(ds):
return {'__!timedelta': ds.total_seconds()}
def object_hook(obj):
"""Convert a json object dict back into a python object.
This looks for our objects that have encoded richer representations with
a ``__!{type}`` key.
Parameters
----------
obj : dict
The raw json parsed dictionary.
Returns
-------
parsed : any
The richer form of the object.
Notes
-----
The types that this reads can be extended with the ``register`` method.
For example:
>>> class MyList(list):
... pass
>>> @object_hook.register('MyList')
... def _parse_my_list(obj):
... return MyList(obj)
Register can also be called as a function like:
>>> object_hook.register('frozenset', frozenset)
"""
if len(obj) != 1:
return obj
key, = obj.keys()
if not key.startswith('__!'):
return obj
return object_hook._converters[key[len('__!'):]](obj[key])
object_hook._converters = {}
object_hook.register = setitem(object_hook._converters)
object_hook.register('datetime', pd.Timestamp)
object_hook.register('frozenset', frozenset)
@object_hook.register('timedelta')
def _read_timedelta(ds):
return datetime.timedelta(seconds=ds)
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/utils.py",
"copies": "1",
"size": "5636",
"license": "bsd-3-clause",
"hash": 1050704111056536400,
"line_mean": 22.4833333333,
"line_max": 79,
"alpha_frac": 0.5746983676,
"autogenerated": false,
"ratio": 3.4386821232458815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45133804908458813,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import datetime
try:
from cytoolz import nth
except ImportError:
from toolz import nth
from itertools import islice
from collections import Iterator
from multiprocessing.pool import ThreadPool
# these are used throughout blaze, don't remove them
from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
import psutil
import numpy as np
# Imports that replace older utils.
from .compatibility import map, zip
from .dispatch import dispatch
thread_pool = ThreadPool(psutil.cpu_count())
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True)
<itertools.islice object at ...>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if not lazy and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
@dispatch(datetime.datetime)
def json_dumps(dt):
s = dt.isoformat()
if not dt.tzname():
s += 'Z'
return s
| {
"repo_name": "maxalbert/blaze",
"path": "blaze/utils.py",
"copies": "10",
"size": "3822",
"license": "bsd-3-clause",
"hash": 2652509503612707000,
"line_mean": 22.4478527607,
"line_max": 79,
"alpha_frac": 0.5596546311,
"autogenerated": false,
"ratio": 3.3703703703703702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.893002500147037,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import fnmatch
import numpy as np
from scipy.misc import imread, imresize, imsave
PROJECT_NAME = "Cervical Cancer Screening"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"),
"Documents/datasets", PROJECT_NAME)
ORIGINAL_DATASET_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "original")
PROCESSED_DATASET_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "processed")
PROCESSED_IMAGE_HEIGHT, PROCESSED_IMAGE_WIDTH = 300, 224
def get_certain_files_recursively_within_folder(root_folder_path,
file_name_rule):
for folder_path, _, file_name_list in os.walk(root_folder_path):
for file_name in fnmatch.filter(file_name_list, file_name_rule):
yield os.path.join(folder_path, file_name)
def perform_preprocessing(original_image_file_path, processed_image_file_path):
try:
original_image = imread(original_image_file_path)
original_image_height, original_image_width = original_image.shape[:2]
if (PROCESSED_IMAGE_HEIGHT > PROCESSED_IMAGE_WIDTH) != (
original_image_height > original_image_width):
original_image = np.swapaxes(original_image, 0, 1)
processed_image_parent_folder_path = os.path.dirname(
processed_image_file_path)
if not os.path.isdir(processed_image_parent_folder_path):
os.makedirs(processed_image_parent_folder_path)
imsave(
processed_image_file_path,
imresize(original_image,
(PROCESSED_IMAGE_HEIGHT, PROCESSED_IMAGE_WIDTH)))
assert os.path.isfile(processed_image_file_path)
except Exception as exception:
print("[WARNING]: exception for %s: %s" %
(original_image_file_path[len(ORIGINAL_DATASET_FOLDER_PATH):],
exception))
def run():
print("[INFO]: resizing and rotating images ...")
print("[INFO]: original folder: %s" % ORIGINAL_DATASET_FOLDER_PATH)
print("[INFO]: processed folder: %s" % PROCESSED_DATASET_FOLDER_PATH)
original_image_file_path_list = list(
get_certain_files_recursively_within_folder(
ORIGINAL_DATASET_FOLDER_PATH, "*.jpg"))
for original_image_file_index, original_image_file_path in enumerate(
original_image_file_path_list, start=1):
print("[INFO]: working on image %s/%s ..." %
(original_image_file_index, len(original_image_file_path_list)))
processed_image_file_path = PROCESSED_DATASET_FOLDER_PATH + original_image_file_path[
len(ORIGINAL_DATASET_FOLDER_PATH):]
if not os.path.isfile(processed_image_file_path):
perform_preprocessing(original_image_file_path,
processed_image_file_path)
print("[INFO]: edited all images, exit!")
if __name__ == "__main__":
run()
| {
"repo_name": "nixingyang/Kaggle-Competitions",
"path": "Cervical Cancer Screening/data_preprocessing.py",
"copies": "1",
"size": "2963",
"license": "mit",
"hash": 9166761521032873000,
"line_mean": 40.1527777778,
"line_max": 93,
"alpha_frac": 0.6422544718,
"autogenerated": false,
"ratio": 3.6580246913580248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9799540392472446,
"avg_score": 0.00014775413711583924,
"num_lines": 72
} |
from __future__ import absolute_import, division, print_function
import os
import glob
try:
import pydicom
except ImportError:
import dicom as pydicom
import numpy as np
from glue.logger import logger
from glue.core.data import Data
from glue.config import data_factory
__all__ = ['is_dicom', 'dicom_reader']
def is_dicom_file(filename):
"""
This function is used to check whether a file is in the DICOM format. We
do this by checking that bytes 129 to 132 are DICM. See
http://stackoverflow.com/questions/4792727/validating-dicom-file
for more details
"""
try:
with open(filename, 'rb') as f:
f.read(128)
if f.read(4) == b'DICM':
return True
else:
return False
except Exception:
return False
def dicom_label(filename):
"""
This function just returns the name of the file without the .dcm extension
if present. We don't strip off any other extensions in case they are part
of the name and not actually an extension.
"""
label = os.path.basename(os.path.normpath(filename))
if label.endswith('.dcm'):
label = label[:-4]
return label
def is_dicom(source):
"""
Determine if the source is either a DICOM file or a directory that
contains at least one DICOM file.
"""
if os.path.isdir(source):
for filename in glob.glob(os.path.join(source, '*')):
if is_dicom_file(filename):
return True
return False
else:
return is_dicom_file(source)
@data_factory(
label='DICOM file or directory',
identifier=is_dicom,
priority=100,
)
def dicom_reader(source):
"""
Read a DICOM file or a directory with DICOM files
"""
if os.path.isdir(source):
# We are dealing with a directory which should contain DICOM files. At
# this point, we need to check whether the directory contains zero,
# one, or more DICOM datasets.
arrays = {}
for filename in glob.glob(os.path.join(source, '*')):
if is_dicom_file(filename):
logger.info("Reading DICOM data from {0}".format(filename))
ds = pydicom.read_file(filename)
arrays[dicom_label(filename)] = ds.pixel_array
else:
logger.info("Not a DICOM file: {0}".format(filename))
# If there are no DICOM files, we raise an error, and if there is one
# then we are done!
if len(arrays) == 0:
raise Exception("No DICOM files found in directory: {0}".format(source))
elif len(arrays) == 1:
label = list(arrays.keys())[0]
return [Data(array=arrays[label], label=label)]
# We now check whether all the shapes of the DICOM files are the same,
# and if so, we merge them into a single file.
labels = sorted(arrays)
ref_shape = arrays[labels[0]].shape
for label in labels[1:]:
if arrays[label].shape != ref_shape:
break
else:
# Since we are here, the shapes of all the DICOM files match, so
# we can construct a higher-dimensional array.
# Make sure arrays are sorted while constructing array
array = np.array([arrays[label] for label in labels])
# We flip the array here on that in most cases we expect that the
# scan will start at the top of e.g. the body and move downwards.
array = array[::-1]
return [Data(array=array, label=dicom_label(source))]
# If we are here, the shapes of the DICOM files didn't match, so we
# simply return one Data object per DICOM file.
return [Data(array=arrays[label], label=label) for label in labels]
else:
ds = pydicom.read_file(source)
data = [Data(array=ds.pixel_array, label=dicom_label(source))]
return data
| {
"repo_name": "glue-viz/glue-medical",
"path": "glue_medical/dicom_factory.py",
"copies": "1",
"size": "3969",
"license": "bsd-3-clause",
"hash": -8404836052072223000,
"line_mean": 28.4,
"line_max": 84,
"alpha_frac": 0.6084656085,
"autogenerated": false,
"ratio": 3.9219367588932808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030402367393281,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import imp
import sys
from collections import namedtuple
from glue.logger import logger
"""
Objects used to configure Glue at runtime.
"""
__all__ = ['Registry', 'SettingRegistry', 'ExporterRegistry',
'ColormapRegistry', 'DataFactoryRegistry', 'QtClientRegistry',
'LinkFunctionRegistry', 'LinkHelperRegistry', 'ViewerToolRegistry',
'SingleSubsetLayerActionRegistry', 'ProfileFitterRegistry',
'qt_client', 'data_factory', 'link_function', 'link_helper',
'colormaps', 'exporters', 'settings', 'fit_plugin',
'auto_refresh', 'importer', 'DictRegistry', 'preference_panes',
'PreferencePanesRegistry', 'DataExporterRegistry', 'data_exporter']
CFG_DIR = os.path.join(os.path.expanduser('~'), '.glue')
class Registry(object):
"""Container to hold groups of objects or settings.
Registry instances are used by Glue to track objects
used for various tasks like data linking, widget creation, etc.
They have the following properties:
- A `members` property, which lists each item in the registry
- A `default_members` function, which can be overridden to lazily
initialize the members list
- A call interface, allowing the instance to be used as a decorator
for users to add new items to the registry in their config files
"""
def __init__(self):
self._members = []
self._lazy_members = []
self._loaded = False
@property
def members(self):
""" A list of the members in the registry.
The return value is a list. The contents of the list
are specified in each subclass"""
self._load_lazy_members()
if not self._loaded:
self._members = self.default_members() + self._members
self._loaded = True
return self._members
def default_members(self):
"""The member items provided by default. These are put in this
method so that code is only imported when needed"""
return []
def add(self, value):
"""
Add a new item to the registry.
"""
self._members.append(value)
def lazy_add(self, value):
"""
Add a reference to a plugin which will be loaded when needed.
"""
self._lazy_members.append(value)
def _load_lazy_members(self):
from glue.plugins import load_plugin
while self._lazy_members:
plugin = self._lazy_members.pop()
load_plugin(plugin)
def __iter__(self):
return iter(self.members)
def __len__(self):
return len(self.members)
def __contains__(self, value):
return value in self.members
def __call__(self, arg):
"""This is provided so that registry instances can be used
as decorators. The decorators should add the decorated
code object to the registry, and return the original function"""
self.add(arg)
return arg
class DictRegistry(Registry):
"""
Base class for registries that are based on dictionaries instead of lists
of objects.
"""
def __init__(self):
self._members = {}
self._lazy_members = []
self._loaded = False
@property
def members(self):
self._load_lazy_members()
if not self._loaded:
defaults = self.default_members()
for key in defaults:
if key in self._members:
self._members[key].extend(defaults[key])
else:
self._members[key] = defaults[key]
self._loaded = True
return self._members
def default_members(self):
return {}
class SettingRegistry(DictRegistry):
"""Stores key/value settings that code can use to customize Glue
Each member is a tuple of 3 items:
- key: the setting name [str]
- value: the default setting [object]
- validator: A function which tests whether the input is a valid value,
and raises a ValueError if invalid. On valid input,
returns the (possibly sanitized) setting value.
"""
def __init__(self):
super(SettingRegistry, self).__init__()
self._validators = {}
self._defaults = {}
def add(self, key, default=None, validator=None):
if validator is None:
validator = lambda x: x
self._defaults[key] = validator(default)
self._validators[key] = validator
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError("No such setting: {0}".format(attr))
else:
if attr in self._members:
return self._members[attr]
elif attr in self._defaults:
return self._defaults[attr]
else:
raise AttributeError("No such setting: {0}".format(attr))
def __setattr__(self, attr, value):
if attr.startswith('_'):
object.__setattr__(self, attr, value)
elif attr in self:
self._members[attr] = self._validators[attr](value)
else:
raise AttributeError("No such setting: {0}".format(attr))
def __dir__(self):
return sorted(self._members.keys())
def __contains__(self, setting):
return setting in self._defaults
def __iter__(self):
for key in self._defaults:
value = self._members.get(key, self._defaults[key])
yield key, value, self._validators[key]
def reset_defaults(self):
self._members.clear()
def is_default(self, setting):
return setting in self._defaults and not setting in self._members
class QGlueParserRegistry(Registry):
"""
Registry for parsers that can be used to interpret arguments to the
:func:`~glue.qglue` function.
The members property is a list of parsers, each represented as a named tuple
with ``data_class``, ``parser`` and ``priority`` attributes, where ``class``
defines the class for which to use the parser, and ``parser`` is a function
that takes the input data and returns a list of glue
:class:`~glue.core.Data` objects. The ``parser`` functions should take two
arguments: the variable containing the data being parsed, and a label. In
addition, the priority (defaulting to 0) can be specified in case one wants
to make sure sub-classes get tested before more general classes. The
priority should be a numerical value, and the larger it is the higher the
priority.
"""
item = namedtuple('DataFactory', 'data_class parser priority')
def add(self, data_class, parser, priority=0):
"""
Add a new parser
Parameters
----------
data_class : class
The type of of data for which to use the specified parser
parser : func
The function to use to parse the input data
priority : int, optional
The priority, which is used to determine the order in which to check
the parsers.
"""
self.members.append(self.item(data_class, parser, priority))
def __call__(self, data_class, priority=0):
def adder(func):
if isinstance(data_class, tuple):
for dc in data_class:
self.add(dc, func, priority=priority)
else:
self.add(data_class, func, priority=priority)
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: -x.priority):
yield member
class DataImportRegistry(Registry):
"""
Stores functions which can import data.
The members property is a list of importers, each represented as a
``(label, load_function)`` tuple. The ``load_function`` should take no
arguments and return a list of :class:`~glue.core.data.Data` objects.
"""
def add(self, label, importer):
"""
Add a new importer
:param label: Short label for the importer
:type label: str
:param importer: importer function
:type importer: function()
"""
self.members.append((label, importer))
def __call__(self, label):
def adder(func):
self.add(label, func)
return func
return adder
class MenubarPluginRegistry(Registry):
"""
Stores menubar plugins.
The members property is a list of menubar plugins, each represented as a
``(label, function)`` tuple. The ``function`` should take two items which
are a reference to the session and to the data collection respectively.
"""
def add(self, label, function):
"""
Add a new menubar plugin
:param label: Short label for the plugin
:type label: str
:param function: function
:type function: function()
"""
self.members.append((label, function))
def __call__(self, label):
def adder(func):
self.add(label, func)
return func
return adder
class PreferencePanesRegistry(DictRegistry):
"""
Stores preference panes
The members property is a list of tuples of Qt widget classes that can have
their own tab in the preferences window.
"""
def add(self, label, widget_cls):
self._members[label] = widget_cls
def __iter__(self):
for label in self._members:
yield label, self._members[label]
class ExporterRegistry(Registry):
"""Stores functions which can export an applocation to an output file
The members property is a list of exporters, each represented
as a (label, save_function, can_save_function, outmode) tuple.
save_function takes an (application, path) as input, and saves
the session
can_save_function takes an application as input, and raises an
exception if saving this session is not possible
outmode is a string, with one of 3 values:
'file': indicates that exporter creates a file
'directory': exporter creates a directory
'label': exporter doesn't write to disk, but needs a label
"""
def add(self, label, exporter, checker, outmode=None):
"""
Add a new exporter
Parameters
----------
label : str
Short label for the exporter
exporter : func
Exporter function which takes two arguments: the application and
optionally the path or label to create. This function should raise
an exception if export isn't possible.
checker : func
Function that checks if saving is possible, which takes one
argument: the application.
outmode : str or `None`
Indicates what kind of output is created. This can be either set to
``'file'``, ``'directory'``, ``'label'``, or `None`.
"""
self.members.append((label, exporter, checker, outmode))
class ColormapRegistry(Registry):
"""Stores colormaps for the Image Viewer. The members property is
a list of colormaps, each represented as a [name,cmap] pair.
"""
def default_members(self):
import matplotlib.cm as cm
members = []
members.append(['Gray', cm.gray])
members.append(['Purple-Blue', cm.PuBu])
members.append(['Yellow-Green-Blue', cm.YlGnBu])
members.append(['Yellow-Orange-Red', cm.YlOrRd])
members.append(['Red-Purple', cm.RdPu])
members.append(['Blue-Green', cm.BuGn])
members.append(['Hot', cm.hot])
members.append(['Red-Blue', cm.RdBu])
members.append(['Red-Yellow-Blue', cm.RdYlBu])
members.append(['Purple-Orange', cm.PuOr])
members.append(['Purple-Green', cm.PRGn])
return members
def add(self, label, cmap):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append([label, cmap])
class DataFactoryRegistry(Registry):
"""Stores data factories. Data factories take filenames as input,
and return :class:`~glue.core.data.Data` instances
The members property returns a list of (function, label, identifier,
priority) namedtuples:
- Function is the factory that creates the data object
- label is a short human-readable description of the factory
- identifier is a function that takes ``(filename, **kwargs)`` as input
and returns True if the factory can open the file
- priority is a numerical value that indicates how confident the data
factory is that it should read the data, relative to other data
factories. For example, a highly specialized FITS reader for specific
FITS file types can be given a higher priority than the generic FITS
reader in order to take precedence over it.
New data factories can be registered via::
@data_factory('label_name', identifier=identifier, priority=10)
def new_factory(file_name):
...
If not specified, the priority defaults to 0.
"""
item = namedtuple('DataFactory', 'function label identifier priority deprecated')
def __call__(self, label, identifier=None, priority=None, default='', deprecated=False):
if identifier is None:
identifier = lambda *a, **k: False
if priority is None:
if deprecated:
priority = -1000
else:
priority = 0
def adder(func):
self.add(self.item(func, label, identifier, priority, deprecated))
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: (-x.priority, x.label)):
yield member
class DataExporterRegistry(Registry):
"""
Stores data exporters. Data exporters take a data/subset object as input
followed by a filename.
"""
item = namedtuple('DataFactory', 'function label extension')
def __call__(self, label, extension=[]):
def adder(func):
self.add(self.item(func, label, extension))
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: x.label):
yield member
class QtClientRegistry(Registry):
"""
Stores QT widgets to visualize data.
The members property is a list of Qt widget classes
New widgets can be registered via::
@qt_client
class CustomWidget(QMainWindow):
...
"""
class ViewerToolRegistry(DictRegistry):
def add(self, tool_cls):
"""
Add a tool class to the registry. The the ``tool_id`` attribute on the
tool_cls should be set, and is used by the viewers to indicate which
tools they want to
"""
if tool_cls.tool_id in self.members:
raise ValueError("Tool ID '{0}' already registered".format(tool_cls.tool_id))
else:
self.members[tool_cls.tool_id] = tool_cls
def __call__(self, tool_cls):
self.add(tool_cls)
return tool_cls
class LinkFunctionRegistry(Registry):
"""Stores functions to convert between quantities
The members properety is a list of (function, info_string,
output_labels) namedtuples. ``info_string`` describes what the
function does. ``output_labels`` is a list of names for each output.
``category`` is a category in which the link funtion will appear (defaults
to 'General').
New link functions can be registered via
@link_function(info="maps degrees to arcseconds",
output_labels=['arcsec'])
def degrees2arcsec(degrees):
return degress * 3600
Link functions are expected to receive and return numpy arrays
"""
item = namedtuple('LinkFunction', 'function info output_labels category')
def __call__(self, info="", output_labels=None, category='General'):
out = output_labels or []
def adder(func):
self.add(self.item(func, info, out, category))
return func
return adder
class SingleSubsetLayerActionRegistry(Registry):
""" Stores custom menu actions available when user selects a single
subset in the data collection view
This members property is a list of (label, tooltip, callback)
tuples. callback is a function that takes a Subset and DataCollection
as input
"""
item = namedtuple('SingleSubsetLayerAction', 'label tooltip callback icon')
def __call__(self, label, callback, tooltip=None, icon=None):
self.add(self.item(label, callback, tooltip, icon))
class LinkHelperRegistry(Registry):
"""Stores helper objects that compute many ComponentLinks at once
The members property is a list of (object, info_string,
input_labels) tuples. `Object` is the link helper. `info_string`
describes what `object` does. `input_labels` is a list labeling
the inputs. ``category`` is a category in which the link funtion will appear
(defaults to 'General').
Each link helper takes a list of ComponentIDs as inputs, and
returns an iterable object (e.g. list) of ComponentLinks.
New helpers can be registered via
@link_helper('Links degrees and arcseconds in both directions',
['degree', 'arcsecond'])
def new_helper(degree, arcsecond):
return [ComponentLink([degree], arcsecond, using=lambda d: d*3600),
ComponentLink([arcsecond], degree, using=lambda a: a/3600)]
"""
item = namedtuple('LinkHelper', 'helper info input_labels category')
def __call__(self, info, input_labels, category='General'):
def adder(func):
self.add(self.item(func, info, input_labels, category))
return func
return adder
class ProfileFitterRegistry(Registry):
item = namedtuple('ProfileFitter', 'cls')
def add(self, cls):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append(cls)
def default_members(self):
from glue.core.fitters import __FITTERS__
return list(__FITTERS__)
class BooleanSetting(object):
def __init__(self, default=True):
self.state = default
def __call__(self, state=None):
if state not in [None, True, False]:
raise ValueError("Invalid True/False setting: %s" % state)
if state is not None:
self.state = state
return self.state
qt_client = QtClientRegistry()
viewer_tool = ViewerToolRegistry()
data_factory = DataFactoryRegistry()
data_exporter = DataExporterRegistry()
link_function = LinkFunctionRegistry()
link_helper = LinkHelperRegistry()
colormaps = ColormapRegistry()
importer = DataImportRegistry()
exporters = ExporterRegistry()
settings = SettingRegistry()
fit_plugin = ProfileFitterRegistry()
single_subset_action = SingleSubsetLayerActionRegistry()
menubar_plugin = MenubarPluginRegistry()
preference_panes = PreferencePanesRegistry()
qglue_parser = QGlueParserRegistry()
# watch loaded data files for changes?
auto_refresh = BooleanSetting(False)
enable_contracts = BooleanSetting(False)
def load_configuration(search_path=None):
''' Find and import a config.py file
Returns:
The module object
Raises:
Exception, if no module was found
'''
search_order = search_path or _default_search_order()
result = imp.new_module('config')
for config_file in search_order:
dir = os.path.dirname(config_file)
try:
sys.path.append(dir)
config = imp.load_source('config', config_file)
result = config
except IOError:
pass
except Exception as e:
raise type(e)("Error loading config file %s:\n%s" % (config_file, e), sys.exc_info()[2])
finally:
sys.path.remove(dir)
return result
def _default_search_order():
"""
The default configuration file search order:
* current working directory
* environ var GLUERC
* HOME/.glue/config.py
* Glue's own default config
"""
from glue import config
search_order = [os.path.join(os.getcwd(), 'config.py')]
if 'GLUERC' in os.environ:
search_order.append(os.environ['GLUERC'])
search_order.append(os.path.join(config.CFG_DIR, 'config.py'))
return search_order[::-1]
###### Now define global settings ######
GRAY = '#7F7F7F'
BLUE = "#1F78B4"
GREEN = "#33A02C"
RED = "#E31A1C"
ORANGE = "#FF7F00"
PURPLE = "#6A3D9A"
YELLOW = "#FFFF99"
BROWN = "#8C510A"
PINK = "#FB9A99"
LIGHT_BLUE = "#A6CEE3"
LIGHT_GREEN = "#B2DF8A"
LIGHT_RED = "#FB9A99"
LIGHT_ORANGE = "#FDBF6F"
LIGHT_PURPLE = "#CAB2D6"
settings.add('SUBSET_COLORS', [RED, GREEN, BLUE, BROWN, ORANGE, PURPLE, PINK], validator=list)
settings.add('DATA_COLOR', '0.35')
settings.add('DATA_ALPHA', 0.8, validator=float)
settings.add('BACKGROUND_COLOR', '#FFFFFF')
settings.add('FOREGROUND_COLOR', '#000000')
settings.add('SHOW_LARGE_DATA_WARNING', True, validator=bool)
| {
"repo_name": "saimn/glue",
"path": "glue/config.py",
"copies": "1",
"size": "21193",
"license": "bsd-3-clause",
"hash": -358768000017613400,
"line_mean": 30.2581120944,
"line_max": 100,
"alpha_frac": 0.6271410371,
"autogenerated": false,
"ratio": 4.2411446868120874,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034618443304354615,
"num_lines": 678
} |
from __future__ import (absolute_import, division, print_function)
import os
import importlib
import time
import glob
import numpy as np
from copy import copy
import astropy.io.fits as astropy_fitsio
from astropy.table import Table, Column
import astropy.table
from desitarget.targetmask import desi_mask
from desitarget.mock.io import decode_rownum_filenum
############################################################
def features_parse(features):
"""
Extracts all valid lines in fiberassign features file to strings (i.e. no
automatic processing of values to appropriate types).
Args:
features: path to a features file
Returns:
dict with keys and values as features file.
"""
d = dict()
with open(features,'r') as f:
for line in f.readlines():
if line.startswith('-'): break
if line.startswith('#'): continue
try:
w = line.index(' ')
except ValueError:
continue
k = line[0:w]
v = line[w+1:].strip()
d[k] = v
return d
############################################################
def make_mtls_for_sources(source_defs,output_dir,reset=False):
"""
Writes 'intermediate' target list (TL) files that gather data from the
input mocks using the routines in desitarget.mocks (which include target
selection).
Args:
sources: dict of source definitions.
output_dir: location for intermediate mtl files.
reset: If True, force all intermediate TL files to be remade.
Returns:
targets_all: list, target files for each source.
truth_all: list, truth files for each source.
sources_all: list, table of source file locations for each source.
The keys in sources must correspond to the names of modules in
desitarget.mock. These modules must define a function build_mock_target
that can generate target and truth files for that mock.
These intermediate TLs contain all targets from a mock in one file and have
a format similar to the final fiberassign MTL, but they are not suitable
for input to fiberassign on their own.
These might well be cached on disk after they're made, but that's left to
the desitarget.mock reading routines.
"""
targets_all = list()
truth_all = list()
sourcefiles_all = list()
print('The following mock catalog sources are specified:')
for source_name in sorted(source_defs.keys()):
print('{}'.format(source_name))
# Iterate over sources in predictable order to genereate data (or read from
# cached files)
for source_name in sorted(source_defs.keys()):
module_name = 'desitarget.mock.{}'.format(source_name)
print('')
print('Reading mock {}'.format(source_name))
print('Using module {}'.format(module_name))
M = importlib.import_module(module_name)
t0 = time.time()
mock_kwargs = copy(source_defs[source_name])
mock_kwargs['output_dir'] = output_dir
mock_kwargs['write_cached_targets'] = True
mock_kwargs['remake_cached_targets'] = reset
targets, truth, sourcefiles = M.build_mock_target(**mock_kwargs)
t1 = time.time()
targets_all.append(targets)
truth_all.append(truth)
sourcefiles_all.append(sourcefiles)
print('Data read for mock {}, took {:f}s'.format(source_name,t1-t0))
print('')
return targets_all, truth_all, sourcefiles_all
############################################################
def combine_targets_truth(input_target_data, input_truth_data, input_sources=None):
"""
Combines target and truth table in memory.
Creates one target table and one truth table out of a set of input files. Each
imput is assumed to be a valid target or truth table in its own right.
Args:
input_target_data : list of arrays or Tables of targets
input_truth_data : list of arrays or Tables of truth
input_sources : optional, list of source file Tables
Returns:
combination_dict : dict with the following keys:
targets: concatenation of input target tables
truth: concatenation of input truth tables
(and the following only if sources != None)
sources: concatenation of input source lists
sources_meta: concatenation of input source lists
"""
print('Combining {:d} target, {:d} truth tables'.format(len(input_target_data),len(input_truth_data)))
combination_dict = dict()
# Sanity check: all inputs have to have signed int64 dtype for TARGETID
# column otherwise the stacking turns this column into float64.
for i,t in enumerate(input_target_data):
if not isinstance(t,Table):
raise Exception('Input #{} not a table'.format(i))
if not t['TARGETID'].dtype.type == np.dtype('int64').type:
raise Exception('Invalid TARGETID dtype {} for input #{}'.format(t['TARGETID'].dtype,i))
print(' -- {:4d} : {:12d} rows'.format(i,len(t)))
# Combine TARGET tables
#if len(input_target_data) > 1:
master_target_table = astropy.table.vstack(input_target_data)
#else:
# master_target_table = input_target_data[0]
total_targets = len(master_target_table)
print('Total : {:12d} rows'.format(total_targets))
# Combine TRUTH tables
#if len(input_truth_data) > 1:
master_truth_table = astropy.table.vstack(input_truth_data)
#else:
# master_truth_table = input_truth_data[0]
if (len(master_truth_table) != total_targets):
raise Exception('%d rows in final target table but %d rows in final truth table'%(total_targets,len(master_truth_table)))
# Verify correct targetid column
assert(master_target_table['TARGETID'].dtype.type == np.dtype('int64').type)
assert(master_truth_table['TARGETID'].dtype.type == np.dtype('int64').type)
combination_dict['targets'] = master_target_table
combination_dict['truth'] = master_truth_table
# Propagate source lists from SOURCE extension of input truth tables.
if input_sources is not None:
# if len(input_sources) > 1:
master_source_table = astropy.table.vstack(input_sources)
# else:
# master_source_table = input_sources[0]
total_rows_in_source_table = np.sum(master_source_table['NROWS'],dtype=np.int64)
sources_meta = dict()
sources_meta['NSELECTED'] = list()
sources_meta['NSOURCES'] = list()
for i in range(0,len(input_sources)):
sources_meta['NSELECTED'].append(len(input_target_data[i]))
sources_meta['NSOURCES'].append(len(input_sources[i]))
sources_meta = Table(sources_meta)
# Note that the source files will have more rows than go into the MTL,
# because only a fration are selected as targets. Hence this assert
# will fail:
# assert(total_rows_in_source_table == total_targets)
combination_dict['sources'] = master_source_table
combination_dict['sources_meta'] = sources_meta
return combination_dict
############################################################
def make_catalogs_for_source_files(fa_output_dir,
input_mtl,input_truth,
catalog_path,
fa_output_base='tile_',
tilefile=None,
reset=False):
"""
Takes the fibermap fits files output by fiberassign (one per tile) and
creates a catalog row-matched to the orginal mock from which targets
were selected when creating the MTL.
Args:
catalog_path : Path to a fiber_to_mtl.fits catalog.
fa_output_dir : fiberassign output directory
fa_output_base : optional, basename of fiberassign output files
The output recreates the full directory structure under the directory of
catalog_path. So if a mock file is found under /path/to/mock/file_1.fits,
the corresponding assignment catalog will appear under
$catalog_path/path/to/mock/file_1_FA.fits.
Notes:
Here's a diagram of a loop representing the analysis of fiber assignment
given some mock catalogues, stages A-E:
E-> A. Multiple mock sources, multiple mock files per source
| |
| v (target selection)
| B. Target list (TL) files (one per mock source)
| |
| v (make_mtl)
| C. Single MTL and Truth files
| |
| v (fiberassign)
| D. FA tilemap files (assignments and potential assignments)
|___|
The idea of this script (E) is to map D back to A, using information
propaged through C and B.
Matching arbitrary TARGETIDS is slow (not least because it requires the
input mock to be read). The alternative approach here assumes that the
TARGETIDs assigned in the TL files (created in stage B) encode the row
and filenumber of each selected target in the original mock files. The
filenumber is defined by the read order of the files, which is
propagated through the headers of the TL Truth and the MTL Truth.
desitarget.mtl.make_mtl overwrites the TL TARGETIDS when it creates the
MTL (stage C). The scheme for doing this is still undecided. Here I've
assumed that the MTL TARGETIDS (for targets read from the input files,
as opposed to skys and standards) simply correspond to row numbers in
the MTL itself. These are the TARGETIDS in the fiber maps created in
stage D.
NOTE: the step B->C potentially involves the omission of rows
corresponding to targets that are selected, but not observed (e.g.
N_OBS = 0). This isn't handled at the moment, so trim=False is required
in make_mtl.
Assumptions:
The TARGETIDs in B are stored for rows in C as ORIGINAL_TARGETID in
the truth table. This covers the possibility that MTL TARGETIDS
encode something else.
"""
# Pattern to extract tile number from file name, not strictly required to
# do this sice tile number is stored in header of tile file.
tile_regex = '%s([0-9]+).fits'%(fa_output_base)
fa_output_pattern = os.path.join(fa_output_dir,fa_output_base + '[0-9]*.fits')
fa_output_files = glob.glob(fa_output_pattern)
# Make/get the table describing observations of each target in MTL order
# (the catalog)
if reset or not(os.path.exists(catalog_path)):
print('Gathering fiber maps to MTL order...')
t = reduce_fiber_maps_to_mtl(fa_output_files,input_mtl,catalog_path,tilefile=tilefile)
t.write(catalog_path,overwrite=True)
else:
print('Reading fiber map in MTL order...')
t = Table.read(catalog_path)
nrows_mtl = len(t)
# Now expand the MTL-like fiber map out to many files row-matched to the
# individual mock input files, including rows corresponding to targets that
# were not selected or were selected but not assigned fibers.
# First read the original target ids from the MTL input files, then use
# those to recreate the mock brick order.
truth_fits = astropy_fitsio.open(input_truth)
original_targetid = truth_fits['TRUTH'].data['ORIGINAL_TARGETID']
truth_fits.close()
# Read the TRUTH source list from the header
source_list = Table.read(input_truth,hdu='SOURCES')
source_meta = Table.read(input_truth,hdu='SOURCEMETA')
# Select blocks of rows corresponding to each sorce file
n_mtl_rows_processed = 0
n_sources_processed = 0
for iinput in range(0,len(source_meta)):
n_sources_this_input = source_meta['NSOURCES'][iinput]
n_mtl_rows_this_input = source_meta['NSELECTED'][iinput]
print('Source {:d} has {:d} input files and {:d} rows in the MTL'.format(iinput, n_sources_this_input, n_mtl_rows_this_input))
o_s = n_sources_processed
l_s = n_sources_this_input
sources_this_input = source_list[o_s:o_s+l_s]
# Expand orginal targetids from truth file. These are assumed to
# encode the row and file number in the original mock file.
o = n_mtl_rows_processed
l = n_mtl_rows_this_input
irow, ifile = decode_rownum_filenum(original_targetid[o:o+l])
# Loop over orginal mock files.
print('Looping over {:d} original mock files'.format(l_s))
for ioriginal,original_file in enumerate(sources_this_input['FILE']):
# print('Original file: %s'%(original_file))
# FIXME having to use 1 for the extension here, no extname
original_nrows = astropy_fitsio.getheader(original_file,1)['NAXIS2']
# Select rows in this mock file.
w = np.where(ifile == ioriginal)[0]
assert(len(w) > 0)
assert(np.all(irow[w]>=0))
# Make a table row-matched to the specific source file.
source_table = Table()
column_name_remap = {'TARGETID': 'MTL_TARGETID'}
for colname in t.colnames:
c = t[colname]
column_name = column_name_remap.get(c.name,c.name)
source_table.add_column(Column(np.repeat(-1,original_nrows),
name = column_name,
dtype = c.dtype))
source_table[column_name][irow[w]] = t[c.name][o:o+l][w]
catalog_dir = os.path.split(catalog_path)[0]
original_file_path, original_file_name = os.path.split(original_file)
new_source_file_name = original_file_name.replace('.fits','_FA.fits')
new_source_file_dir = os.path.join(catalog_dir,*original_file_path.split(os.path.sep))
new_source_file_path = os.path.join(new_source_file_dir,new_source_file_name)
try:
os.makedirs(new_source_file_dir)
except OSError as e:
if e.errno == 17: # File exists
pass
# print('Writing output: %s'%(new_source_file_path))
source_table.write(new_source_file_path,overwrite=True)
# Increment index in the MTL
n_mtl_rows_processed += n_mtl_rows_this_input
# Increment index in the list of sources
n_sources_processed += l_s
return
############################################################
def reduce_fiber_maps_to_mtl(fa_output_files,input_mtl,output_dir,tilefile=None):
"""
Reads all the FA output files and creates a table of assignments and
potential assignments row-matched to the input MTL. Assumes that TARGETID
for targets read from the MTL is the row index in the MTL -- hence no need
to match TARGETIDS explicitly.
Args:
input_mtl : location of MTL file fed to FA
output_dir : directory to write the catalog output
"""
# Get number of rows in original MTL
mtl_header = astropy_fitsio.getheader(input_mtl,ext=1)
nrows_mtl = mtl_header['NAXIS2']
# Read all the FA files
fa_output_all = list()
fa_potential_all = list()
tileids_in_read_order = list()
n_per_tile_all = list()
n_potential_per_tile_all = list()
print('Reading {:d} fiberassign tile outputs'.format(len(fa_output_files)))
for fa_file in sorted(fa_output_files):
# Read assignments and list of potential assignments
fa_this_tile, fa_header_this_tile = astropy_fitsio.getdata(fa_file,
memmap = False,
ext = ('FIBER_ASSIGNMENTS',1),
header = True)
fa_potential_this_tile = astropy_fitsio.getdata(fa_file,
memmap = False,
ext =('POTENTIAL_ASSIGNMENTS',1))
tileid = fa_header_this_tile['TILEID']
n_this_tile = len(fa_this_tile['TARGETID'])
n_potential_this_tile = len(fa_potential_this_tile)
tileids_in_read_order.append(tileid)
n_per_tile_all.append(n_this_tile)
n_potential_per_tile_all.append(n_potential_this_tile)
fa_output_all.append(fa_this_tile)
fa_potential_all.append(fa_potential_this_tile)
# Sanity checks
unique_tile_ids = np.unique(tileids_in_read_order)
assert(len(unique_tile_ids) == len(tileids_in_read_order))
# Merge all the tiles and convert to Table
print('Concatenating fiberassign tables...')
fa_output = Table(np.concatenate(fa_output_all))
fa_potential = Table(np.concatenate(fa_potential_all))
# Add tileid as a column
tileid = np.repeat(tileids_in_read_order,n_per_tile_all)
fa_output.add_column(Column(tileid,name='TILEID'))
tileid_potential = np.repeat(tileids_in_read_order,n_potential_per_tile_all)
fa_potential.add_column(Column(tileid_potential,name='TILEID'))
# Get number of passes from tile file (sadly not stored in header)
print('Reading tile data: %s'%(tilefile))
tile_data = Table.read(tilefile)
pass_of_tile = list()
# Assign pass to each tileid (note that there are as many tileids as input
# files.
for tileid in tileids_in_read_order:
i = np.where(tile_data['TILEID'] == tileid)[0]
pass_of_tile.append(tile_data['PASS'][i])
pass_of_tile = np.array(pass_of_tile)
unique_pass = np.unique(pass_of_tile)
npass = len(unique_pass)
print('Have {:d} tiles, {:d} passes'.format(len(tileids_in_read_order),npass))
# Add pass as a column
ipass = np.repeat(pass_of_tile,n_per_tile_all)
fa_output.add_column(Column(ipass,name='PASS'))
ipass_potential = np.repeat(pass_of_tile,n_potential_per_tile_all)
fa_potential.add_column(Column(ipass_potential,name='PASS'))
# The potentialtargetid column containts conflicts with std/skys as well as
# input targets.
# We need to know which potential targets correspond to which entries in
# the fibre map. Do this by brute force for now, adding the targetid of the
# assigned target against each entry in the potential list corresponding to
# its fiber.
parent_targetid = np.repeat(fa_output['TARGETID'], fa_output['NUMTARGET'])
parent_desi_target = np.repeat(fa_output['DESI_TARGET'],fa_output['NUMTARGET'])
assert(len(parent_targetid) == len(fa_potential))
fa_potential.add_column(Column(parent_targetid, dtype=np.int64,name='PARENT_TARGETID'))
fa_potential.add_column(Column(parent_desi_target,dtype=np.int64,name='PARENT_DESI_TARGET'))
# Separate targets that were in the standard or sky MTL rather than the
# main target MTL. Also have unassigned fibres to deal with.
is_free_fiber = fa_output['TARGETID'] < 0
is_sky = (fa_output['DESI_TARGET'] & desi_mask.SKY) != 0
is_std_fstar = (fa_output['DESI_TARGET'] & desi_mask.STD_FSTAR) != 0
is_std_wd = (fa_output['DESI_TARGET'] & desi_mask.STD_WD) != 0
is_std_bright = (fa_output['DESI_TARGET'] & desi_mask.STD_BRIGHT) != 0
is_skystdfree = is_sky | is_std_fstar | is_std_wd | is_std_bright | is_free_fiber
# Sanity check targetid values for use as indices
assert(np.all(fa_output['TARGETID'][np.invert(is_skystdfree)] < nrows_mtl))
skystd_output = fa_output[is_skystdfree].copy()
fa_output.remove_rows(is_skystdfree)
# Same again for potential assignments -- remove those whose 'parent'
# fibres (i.e. the fibres assigned to the targets for which they were the
# other candidates) are skys and standards, or free fibres (since free
# fibres can still have potential targets).
is_free_fiber = fa_potential['PARENT_TARGETID'] < 0
is_sky = (fa_potential['PARENT_DESI_TARGET'] & desi_mask.SKY) != 0
is_std_fstar = (fa_potential['PARENT_DESI_TARGET'] & desi_mask.STD_FSTAR) != 0
is_std_wd = (fa_potential['PARENT_DESI_TARGET'] & desi_mask.STD_WD) != 0
is_std_bright = (fa_potential['PARENT_DESI_TARGET'] & desi_mask.STD_BRIGHT) != 0
is_skystdfree = is_sky | is_std_fstar | is_std_wd | is_std_bright | is_free_fiber
# Sanity check targetid values for use as indices
assert(np.all(fa_potential['PARENT_TARGETID'][np.invert(is_skystdfree)] < nrows_mtl))
skystd_potential = fa_potential[is_skystdfree].copy()
fa_potential.remove_rows(is_skystdfree)
# Some science fibres will have potential targets that are skys and
# standards. These need to stay in the list for now so that the lengths of
# entries in the potential list can be used to iterate over it. Also there
# is no way to get the desi_target field for each potential targetid
# without matching.
# Use targetids as indices to reorder the merged list
t = Table()
t.add_column(Column(np.repeat(-1,nrows_mtl), dtype=np.int64, name='TARGETID'))
# FIXME relies on TARGETID being an index
# Assume we can use targetid as an index
print('reduce_fiber_maps_to_mtl(): WARNING: ASSUMING TARGETID IN FIBER MAP IS MTL ROWNUMBER')
row_in_input = fa_output['TARGETID']
# Copy data for assigned targets. This would be trivial if targets were
# only assigned once, but that's not the case.
unique_rows, primary_row, nobs = np.unique(row_in_input,return_index=True,return_counts=True)
# Copy primary rows for the target list.
# t[row_in_input[primary_row]] = fa_output[primary_row]
t['TARGETID'][row_in_input[primary_row]] = fa_output['TARGETID'][primary_row]
# Copy primary rows for the potential targetid list. Beware that this list
# still contains non-science targets that originate outside the target MTL.
# FIXME relies on TARGETID being an index to filter these out
print('reduce_fiber_maps_to_mtl(): WARNING: ASSUMING TARGETID IN FIBER MAP IS MTL ROWNUMBER')
potential_row_in_input = fa_potential['POTENTIALTARGETID']
is_row_in_mtl = np.where(potential_row_in_input < nrows_mtl)[0]
unique_potential_rows, potential_primary_row, npotential_primary = np.unique(potential_row_in_input[is_row_in_mtl],
return_index=True,return_counts=True)
# Many targets will be in the possible list but not the assigned list, so
# need to set their target numbers. Do this only for potential targets that
# orginate from the mtl.
t['TARGETID'][potential_row_in_input[is_row_in_mtl[potential_primary_row]]] = fa_potential['POTENTIALTARGETID'][is_row_in_mtl[potential_primary_row]]
# Also find unique PARENT values of each potential target. These only
# contain targetids that are valid as indices to the MTL. No need to set
# the output targetids using these since they're all in the target list
# above anyway.
potential_parent_row_in_input = fa_potential['PARENT_TARGETID']
unique_potential_parent_rows, potential_parent_primary_row, npotential_parent = np.unique(potential_parent_row_in_input,return_index=True,return_counts=True)
# Issues
# - this assumes FA is run with tiles from multiple passes
# - some targets will be available to multiple fibres on one tile
# Need to known:
# NOBS Number of times each target is observed (as zcat)
t.add_column(Column(np.zeros(len(t),dtype=np.int32), name='NOBS'))
t['NOBS'][row_in_input[primary_row]] = nobs
# NPOSSIBLE Number of fibres that could ever reach this target on any pass
t.add_column(Column(np.zeros(len(t),dtype=np.int32), name='NPOSSIBLE'))
t['NPOSSIBLE'][potential_row_in_input[is_row_in_mtl[potential_primary_row]]] = npotential_primary
# Add columns per-pass
for i in range(0,npass):
ipass = unique_pass[i]
assert(ipass >= 0)
# Which assigned targets have tiles in this pass?
tiles_this_pass = np.where(fa_output['PASS'][primary_row] == ipass)[0]
assert(len(tiles_this_pass) > 0)
# Store tile if target was assigned on corresponding pass or -1 if not assigned.
colname = 'TILEID_P{:d}'.format(ipass)
t.add_column(Column(np.zeros(len(t),dtype=np.int32)-1,name=colname))
t[colname][row_in_input[primary_row[tiles_this_pass]]] = fa_output['TILEID'][primary_row[tiles_this_pass]]
# NALTERNATIVE Number of other targets available to the fibre of an assigned target
# (equal to the number of potential targets with this target as their primary)
# -1 for targets that were not assigned
colname = 'NALTERNATIVE_P{:d}'.format(ipass)
t.add_column(Column(np.zeros(len(t),dtype=np.int32)-1,name=colname))
t[colname][row_in_input[primary_row[tiles_this_pass]]] = fa_output['NUMTARGET'][primary_row[tiles_this_pass]]
# Which potential targets have tiles in this pass?
# Don't just use primary rows for this, since the primary row will only
# be assocaited with one pass. We want the duplicates on other passes.
# tiles_potential_this_pass = np.where(fa_potential['PASS'][is_row_in_mtl[potential_primary_row]] == ipass)[0]
tiles_potential_this_pass = np.where(fa_potential['PASS'][is_row_in_mtl] == ipass)[0]
assert(len(tiles_potential_this_pass) > 0)
# Store tile if target was considered on corresponding pass or -1 if
# not considered. Many fibres can consider the same target.
colname = 'TILEID_POSSIBLE_P{:d}'.format(ipass)
#tileid_potential_this_pass = fa_potential['TILEID'][is_row_in_mtl[potential_primary_row[tiles_potential_this_pass]]]
tileid_potential_this_pass = fa_potential['TILEID'][is_row_in_mtl[tiles_potential_this_pass]]
t.add_column(Column(np.zeros(len(t),dtype=np.int32)-1,name=colname))
t[colname][potential_row_in_input[is_row_in_mtl[tiles_potential_this_pass]]] = tileid_potential_this_pass
# Any target assigned on this pass should be a potenital target in this
# pass.
is_assigned_this_pass = t['TILEID_P{:d}'.format(ipass)][row_in_input[primary_row[tiles_this_pass]]] >= 0
is_potential_this_pass = t['TILEID_POSSIBLE_P{:d}'.format(ipass)][row_in_input[primary_row[tiles_this_pass]]] >= 0
if np.any(is_assigned_this_pass & (~is_potential_this_pass)):
raise Exception('Targets are assigned but not possible!')
# Not implemented yet
# NFIBSCANREACH Number of fibres that could have assigned each target on this pass
#colname = 'NFIBSCANREACH_P{:d}').format(ipass)
#t.add_column(Column(np.zeros(len(t),dtype=np.int32)-1,name=colname)
return t
| {
"repo_name": "desihub/fiberassign",
"path": "old/py/desitarget.py",
"copies": "1",
"size": "27168",
"license": "bsd-3-clause",
"hash": 5347282858880854000,
"line_mean": 44.8918918919,
"line_max": 161,
"alpha_frac": 0.6337971143,
"autogenerated": false,
"ratio": 3.6748275395644527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9766713080157475,
"avg_score": 0.00838231474139536,
"num_lines": 592
} |
from __future__ import absolute_import, division, print_function
import os
import json
from glue.core import Subset
DISPATCH = {}
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = list(map(save_plot, page, range(len(page))))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
typ = type(plot)
return DISPATCH[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.viewers.scatter.qt.ScatterWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=props['xatt'].label,
range=[props['xmin'], props['xmax']])
result['yAxis'] = dict(columnName=props['yatt'].label,
range=[props['ymin'], props['ymax']])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.viewers.histogram.qt.HistogramWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=props['component'].label,
bins=props['nbins'],
range=[props['xmin'], props['xmax']])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
data = dc[0]
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, tuple(DISPATCH.keys())):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
from astropy.table import Table, Column
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = list(map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets))
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
launch(path)
def launch(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from glue.external.six.moves.socketserver import TCPServer
from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print('Serving D3PO on port 0.0.0.0:%i' % PORT)
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
def setup():
from glue.config import exporters
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
try:
from glue.viewers.scatter.qt import ScatterWidget
from glue.viewers.histogram.qt import HistogramWidget
except ImportError:
pass
else:
DISPATCH[ScatterWidget] = save_scatter
DISPATCH[HistogramWidget] = save_histogram
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/export_d3po.py",
"copies": "1",
"size": "8913",
"license": "bsd-3-clause",
"hash": -7839673022892680000,
"line_mean": 27.5673076923,
"line_max": 121,
"alpha_frac": 0.6159542242,
"autogenerated": false,
"ratio": 3.669411280362289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783319380663702,
"avg_score": 0.00040922477971754647,
"num_lines": 312
} |
from __future__ import absolute_import, division, print_function
import os
import json
from itertools import islice
import datashape
from dynd import nd
from collections import Iterator
from datashape.discovery import discover
from datashape import var
from ..utils import partition_all, nth, nth_list, ndget
from .. import compatibility
from ..compatibility import _inttypes, map
from .core import DataDescriptor, isdimension
from .utils import coerce, coerce_row_to_dict, coerce_to_ordered
class JSON(DataDescriptor):
"""
A Blaze data descriptor to expose a JSON file.
Parameters
----------
path : string
A path string for the JSON file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the JSON file.
"""
def __init__(self, path, mode='rt', schema=None, dshape=None, open=open,
**kwargs):
self.path = path
self._abspath = os.path.abspath(path)
self.mode = mode
self.open = open
if dshape:
dshape = datashape.dshape(dshape)
if schema:
schema = datashape.dshape(schema)
if dshape and not schema and isdimension(dshape[0]):
schema = dshape.subarray(1)
if not schema and not dshape:
try:
f = open(self.path, 'r')
except:
raise ValueError('No schema detected')
dshape = discover(json.load(f))
f.close()
# Initially the array is not loaded (is this necessary?)
self._cache_arr = None
self._schema = schema
self._dshape = dshape
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
jsonfile = self.open(self.path)
# This will read everything in-memory (but a memmap approach
# is in the works)
self._cache_arr = nd.parse_json(str(self.dshape), jsonfile.read())
try:
jsonfile.close()
except:
pass
return self._cache_arr
def as_dynd(self):
return self._arr_cache
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
class JSON_Streaming(JSON):
"""
A Blaze data descriptor to expose a Streaming JSON file.
Parameters
----------
path : string
A path string for the JSON file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the JSON file.
"""
immutable = False
def __init__(self, path, mode='rt', schema=None, dshape=None, open=open,
nrows_discovery=50):
self.path = path
self._abspath = os.path.abspath(path)
self.mode = mode
self.open = open
if dshape:
dshape = datashape.dshape(dshape)
if schema:
schema = datashape.dshape(schema)
if dshape and not schema and isdimension(dshape[0]):
schema = dshape.subshape[0]
if schema and not dshape:
dshape = var * schema
if not schema and not dshape:
try:
f = open(self.path, 'r')
except:
raise ValueError('No schema detected')
data = list(map(json.loads,
islice(f, 1, nrows_discovery)))
f.close()
dshape = discover(data)
schema = dshape.subshape[0]
# Initially the array is not loaded (is this necessary?)
self._cache_arr = None
self._schema = schema
self._dshape = dshape
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
jsonfile = self.open(self.path)
# This will read everything in-memory (but a memmap approach
# is in the works)
text = '[' + ', '.join(jsonfile) + ']'
try:
jsonfile.close()
except:
pass
self._cache_arr = nd.parse_json(str(self.dshape), text)
return self._cache_arr
def _get_py(self, key):
if isinstance(key, tuple):
result = self[key[0]]
if isinstance(key[0], (list, slice)):
return (ndget(key[1:], row) for row in result)
else:
return ndget(key[1:], result)
f = self.open(self.path)
if isinstance(key, _inttypes):
result = json.loads(nth(key, f))
elif isinstance(key, slice):
result = map(json.loads, islice(f, key.start, key.stop, key.step))
elif isinstance(key, list):
result = map(json.loads, nth_list(key, f))
else:
raise NotImplementedError('Fancy indexing not supported')
try:
if not isinstance(result, Iterator):
f.close()
pass
except AttributeError:
pass
return result
def _iter(self):
f = self.open(self.path)
for line in f:
yield json.loads(line)
try:
f.close()
except AttributeError:
pass
__iter__ = DataDescriptor.__iter__
def as_py(self):
return tuple(self)
def _iterchunks(self, blen=100):
f = self.open(self.path)
for chunk in partition_all(blen, f):
text = '[' + ',\r\n'.join(chunk) + ']'
dshape = str(len(chunk)) + ' * ' + self.schema
yield nd.parse_json(dshape, text)
try:
f.close()
except AttributeError:
pass
@property
def appendable(self):
return any(c in self.mode for c in 'wa+')
def _extend(self, rows):
if isinstance(self.schema[0], datashape.Record):
transform = lambda row: coerce_row_to_dict(self.schema, row)
else:
transform = lambda x: x
if not self.appendable:
raise IOError("Read only access")
f = self.open(self.path, self.mode)
f.seek(0, os.SEEK_END) # go to the end of the file
for row in rows:
json.dump(transform(row), f)
f.write('\n')
try:
f.close()
except AttributeError:
pass
def _chunks(self, blen=100):
f = self.open(self.path)
for chunk in partition_all(blen, f):
text = '[' + ',\r\n'.join(chunk) + ']'
dshape = str(len(chunk) * self.schema)
yield nd.parse_json(dshape, text)
try:
f.close()
except AttributeError:
pass
| {
"repo_name": "vitan/blaze",
"path": "blaze/data/json.py",
"copies": "1",
"size": "6612",
"license": "bsd-3-clause",
"hash": 326317375781498750,
"line_mean": 29.1917808219,
"line_max": 78,
"alpha_frac": 0.5499092559,
"autogenerated": false,
"ratio": 4.007272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057181983172727,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import json
from itertools import islice
import datashape
from dynd import nd
from ..utils import partition_all, nth
from .. import py2help
from ..py2help import _inttypes
from .core import DataDescriptor, isdimension
from .utils import coerce
class JSON(DataDescriptor):
"""
A Blaze data descriptor to expose a JSON file.
Parameters
----------
path : string
A path string for the JSON file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the JSON file.
"""
immutable = True
deferred = False
persistent = True
appendable = False
remote = False
def __init__(self, path, mode='r', schema=None, dshape=None, open=open):
self.path = path
self.mode = mode
self.open = open
if dshape:
dshape = datashape.dshape(dshape)
if dshape and not schema and isdimension(dshape[0]):
schema = dshape.subarray(1)
if isinstance(schema, py2help._strtypes):
schema = datashape.dshape(schema)
if not schema and not dshape:
# TODO: schema detection from file
raise ValueError('No schema found')
# Initially the array is not loaded (is this necessary?)
self._cache_arr = None
self._schema = schema
self._dshape = dshape
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
jsonfile = self.open(self.path)
# This will read everything in-memory (but a memmap approach
# is in the works)
self._cache_arr = nd.parse_json(str(self.dshape), jsonfile.read())
try:
jsonfile.close()
except:
pass
return self._cache_arr
def as_dynd(self):
return self._arr_cache
def as_py(self):
with open(self.path) as f:
result = json.load(f)
return result
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
class JSON_Streaming(JSON):
"""
A Blaze data descriptor to expose a Streaming JSON file.
Parameters
----------
path : string
A path string for the JSON file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the JSON file.
"""
immutable = False
@property
def _arr_cache(self):
if self._cache_arr is not None:
return self._cache_arr
jsonfile = self.open(self.path)
# This will read everything in-memory (but a memmap approach
# is in the works)
text = '[' + ', '.join(jsonfile) + ']'
try:
jsonfile.close()
except:
pass
self._cache_arr = nd.parse_json(str(self.dshape), text)
return self._cache_arr
def __getitem__(self, key):
with self.open(self.path) as f:
if isinstance(key, _inttypes):
result = json.loads(nth(key, f))
elif isinstance(key, slice):
result = list(map(json.loads,
islice(f, key.start, key.stop, key.step)))
else:
raise NotImplementedError('Fancy indexing not supported\n'
'Create DyND array and use fancy indexing from there')
return coerce(self.schema, result)
def _iter(self):
with self.open(self.path) as f:
for line in f:
yield json.loads(line)
__iter__ = DataDescriptor.__iter__
def as_py(self):
return list(self)
def _iterchunks(self, blen=100):
with self.open(self.path) as f:
for chunk in partition_all(blen, f):
text = '[' + ',\r\n'.join(chunk) + ']'
dshape = str(len(chunk)) + ' * ' + self.schema
yield nd.parse_json(dshape, text)
@property
def appendable(self):
return any(c in self.mode for c in 'wa+')
def _extend(self, rows):
if not self.appendable:
raise IOError("Read only access")
with self.open(self.path, self.mode) as f:
f.seek(0, os.SEEK_END) # go to the end of the file
for row in rows:
json.dump(row, f)
f.write('\n')
def _chunks(self, blen=100):
with self.open(self.path) as f:
for chunk in partition_all(blen, f):
text = '[' + ',\r\n'.join(chunk) + ']'
dshape = str(len(chunk) * self.schema)
yield nd.parse_json(dshape, text)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/json.py",
"copies": "1",
"size": "4702",
"license": "bsd-3-clause",
"hash": -5262594590482127000,
"line_mean": 28.7594936709,
"line_max": 78,
"alpha_frac": 0.5621012335,
"autogenerated": false,
"ratio": 3.9612468407750634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009513699727607754,
"num_lines": 158
} |
from __future__ import absolute_import, division, print_function
import os
import logging
import platform
import traceback
import numpy as np
from qtpy import QtCore, QtGui, QtWidgets, compat
from qtpy.QtCore import Qt
from glue.external.six.moves import range as xrange
from glue.core.exceptions import IncompatibleAttribute
from glue.core import Subset
from glue.core.callback_property import add_callback, ignore_callback
from glue.config import fit_plugin, viewer_tool
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.qt.mime import LAYERS_MIME_TYPE
from glue.viewers.common.qt.mouse_mode import RoiMode
from glue.utils.qt import load_ui, get_qapp
from glue.core.qt.simpleforms import build_form_item
from glue.utils.qt.widget_properties import CurrentComboProperty
from glue.app.qt.mdi_area import GlueMdiSubWindow
from glue.viewers.matplotlib.qt.widget import MplWidget
from glue.utils import nonpartial, Pointer
from glue.utils.qt import Worker, messagebox_on_error
from glue.core.subset import RoiSubsetState
from glue.core.qt import roi as qt_roi
from .profile_viewer import ProfileViewer
from glue.viewers.image.state import AggregateSlice
from glue.core.aggregate import mom1, mom2
class Extractor(object):
# Warning:
# Coordinate conversion is not well-defined if pix2world is not
# monotonic!
@staticmethod
def abcissa(data, axis):
slc = [0 for _ in data.shape]
slc[axis] = slice(None, None)
att = data.get_world_component_id(axis)
return data[att, tuple(slc)].ravel()
@staticmethod
def spectrum(data, attribute, roi, slc, zaxis):
# Find the integer index of the x and y axes, which are the axes for
# which the image is shown (the ROI is drawn along these attributes)
xaxis = slc.index('x')
yaxis = slc.index('y')
# Get the actual component IDs corresponding to these axes
xatt = data.get_pixel_component_id(xaxis)
yatt = data.get_pixel_component_id(yaxis)
# Set up a view that does not reduce the dimensionality of the array but
# extracts 1-element slices along dimensions that are not relevant.
view = []
for idim, dim in enumerate(slc):
if idim in (xaxis, yaxis, zaxis):
view.append(slice(None))
else:
view.append(slice(dim, dim + 1))
view = tuple(view)
# We now delegate to RoiSubsetState to compute the mask based on the ROI
subset_state = RoiSubsetState(xatt=xatt, yatt=yatt, roi=roi)
mask = subset_state.to_mask(data, view=view)
# We now extract the values that fall inside the ROI. Unfortunately,
# this returns a flat 1-d array, so we need to then reshape it to get
# an array with shape (n_spec, n_pix), where n_pix is the number of
# pixels inside the ROI
values = data[attribute, view]
if zaxis != 0:
values = values.swapaxes(zaxis, 0)
mask = mask.swapaxes(zaxis, 0)
values = values[mask].reshape(data.shape[zaxis], -1)
# We then average along the spatial dimension
spectrum = np.nanmean(values, axis=1)
# Get the world coordinates of the spectral axis
x = Extractor.abcissa(data, zaxis)
return x, spectrum
@staticmethod
def world2pixel(data, axis, value):
x = Extractor.abcissa(data, axis)
if x.size > 1 and (x[1] < x[0]):
x = x[::-1]
result = x.size - np.searchsorted(x, value) - 2
else:
result = np.searchsorted(x, value) - 1
return np.clip(result, 0, x.size - 1)
@staticmethod
def pixel2world(data, axis, value):
x = Extractor.abcissa(data, axis)
return x[np.clip(value, 0, x.size - 1)]
@staticmethod
def subset_spectrum(subset, attribute, slc, zaxis):
"""
Extract a spectrum from a subset.
This makes a mask of the subset in the **current slice**,
and extracts a tube of this shape over all slices along ``zaxis``.
In other words, the variation of the subset along ``zaxis`` is ignored,
and only the interaction of the subset and the slice is relevant.
:param subset: A :class:`~glue.core.subset.Subset`
:param attribute: The :class:`~glue.core.data.ComponentID` to extract
:param slc: A tuple describing the slice
:param zaxis: Which axis to integrate over
"""
data = subset.data
x = Extractor.abcissa(data, zaxis)
view = [slice(s, s + 1)
if s not in ['x', 'y'] else slice(None)
for s in slc]
mask = np.squeeze(subset.to_mask(view))
if slc.index('x') < slc.index('y'):
mask = mask.T
w = np.where(mask)
view[slc.index('x')] = w[1]
view[slc.index('y')] = w[0]
result = np.empty(x.size)
# treat each channel separately, to reduce memory storage
for i in xrange(data.shape[zaxis]):
view[zaxis] = i
val = data[attribute, view]
result[i] = np.nansum(val) / np.isfinite(val).sum()
y = result
return x, y
class SpectrumContext(object):
"""
Base class for different interaction contexts
"""
viewer_state = Pointer('main.viewer_state')
data = Pointer('main.data')
profile_axis = Pointer('main.profile_axis')
canvas = Pointer('main.canvas')
profile = Pointer('main.profile')
def __init__(self, main):
self.main = main
self.grip = None
self.panel = None
self.widget = None
self._setup_grip()
self._setup_widget()
self._connect()
def _setup_grip(self):
""" Create a :class:`~glue.plugins.tools.spectrum_tool.profile_viewer.Grip` object
to interact with the plot. Assign to self.grip
"""
raise NotImplementedError()
def _setup_widget(self):
"""
Create a context-specific widget
"""
# this is the widget that is displayed to the right of the
# spectrum
raise NotImplementedError()
def _connect(self):
"""
Attach event handlers
"""
pass
def set_enabled(self, enabled):
self.enable() if enabled else self.disable()
def enable(self):
if self.grip is not None:
self.grip.enable()
def disable(self):
if self.grip is not None:
self.grip.disable()
def recenter(self, lim):
"""Re-center the grip to the given x axlis limit tuple"""
if self.grip is None:
return
if hasattr(self.grip, 'value'):
self.grip.value = sum(lim) / 2.
return
# Range grip
cen = sum(lim) / 2
wid = max(lim) - min(lim)
self.grip.range = cen - wid / 4, cen + wid / 4
class NavContext(SpectrumContext):
"""
Mode to set the 2D slice in the parent image widget by dragging
a handle in the spectrum
"""
def _setup_grip(self):
def _set_state_from_grip(value):
"""Update state.slices given grip value"""
if not self.main.enabled:
return
slc = list(self.viewer_state.slices)
# state.slices stored in pixel coords
value = Extractor.world2pixel(
self.data,
self.profile_axis, value)
slc[self.profile_axis] = value
# prevent callback bouncing. Fixes #298
self.viewer_state.slices = tuple(slc)
def _set_grip_from_state(slc):
"""Update grip.value given state.slices"""
if not self.main.enabled:
return
# grip.value is stored in world coordinates
val = slc[self.profile_axis]
if isinstance(val, AggregateSlice):
val = val.center
val = Extractor.pixel2world(self.data, self.profile_axis, val)
# If pix2world not monotonic, this can trigger infinite recursion.
# Avoid by disabling callback loop
# XXX better to specifically ignore _set_state_from_grip
with ignore_callback(self.grip, 'value'):
self.grip.value = val
self.grip = self.main.profile.new_value_grip()
add_callback(self.viewer_state, 'slices', _set_grip_from_state)
add_callback(self.grip, 'value', _set_state_from_grip)
def _connect(self):
pass
def _setup_widget(self):
self.widget = QtWidgets.QTextEdit()
self.widget.setHtml("To <b> slide </b> through the cube, "
"drag the handle or double-click<br><br><br>"
"To make a <b> new profile </b>, "
"click-drag a new box in the image, or drag "
"a subset onto the plot to the left")
self.widget.setTextInteractionFlags(Qt.NoTextInteraction)
class CollapseContext(SpectrumContext):
"""
Mode to collapse a section of a cube into a 2D image.
Supports several aggregations: mean, median, max, mom1, mom2
"""
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
w = QtWidgets.QWidget()
l = QtWidgets.QFormLayout()
w.setLayout(l)
combo = QtWidgets.QComboBox()
combo.addItem("Mean", userData=np.mean)
combo.addItem("Median", userData=np.median)
combo.addItem("Max", userData=np.max)
combo.addItem("Centroid", userData=mom1)
combo.addItem("Linewidth", userData=mom2)
run = QtWidgets.QPushButton("Collapse")
save = QtWidgets.QPushButton("Save as FITS file")
buttons = QtWidgets.QHBoxLayout()
buttons.addWidget(run)
buttons.addWidget(save)
self._save = save
self._run = run
l.addRow("", combo)
l.addRow("", buttons)
self.widget = w
self._combo = combo
self._collapsed_viewer = None
def _connect(self):
self._run.clicked.connect(nonpartial(self._aggregate))
self._save.clicked.connect(nonpartial(self._choose_save))
@property
def aggregator(self):
return self._combo.itemData(self._combo.currentIndex())
@property
def aggregator_label(self):
return self._combo.currentText()
def _aggregate(self):
func = self.aggregator
rng = list(self.grip.range)
rng = Extractor.world2pixel(self.data,
self.profile_axis,
rng)
rng[1] += 1
slices = list(self.viewer_state.slices)
current_slice = slices[self.profile_axis]
if isinstance(current_slice, AggregateSlice):
current_slice = current_slice.center
slices[self.profile_axis] = AggregateSlice(slice(*rng),
current_slice,
func)
self.viewer_state.slices = tuple(slices)
# Save a local copy of the collapsed array
for layer_state in self.viewer_state.layers:
if layer_state.layer is self.viewer_state.reference_data:
break
else:
raise Exception("Couldn't find layer corresponding to reference data")
self._agg = layer_state.get_sliced_data()
@messagebox_on_error("Failed to export projection")
def _choose_save(self):
self._aggregate()
out, _ = compat.getsavefilename(filters='FITS Files (*.fits)')
if not out:
return
self.save_to(out)
def save_to(self, pth):
"""
Write the projection to a file
Parameters
----------
pth : str
Path to write to
"""
from astropy.io import fits
data = self.viewer_state.reference_data
if data is None:
raise RuntimeError("Cannot save projection -- no data to visualize")
self._aggregate()
# try to project wcs to 2D
wcs = getattr(data.coords, 'wcs', None)
if wcs:
try:
wcs.dropaxis(data.ndim - 1 - self.main.profile_axis)
header = wcs.to_header(True)
except Exception as e:
msg = "Could not extract 2D wcs for this data: %s" % e
logging.getLogger(__name__).warn(msg)
header = fits.Header()
else:
header = fits.Header()
lo, hi = self.grip.range
history = ('Created by Glue. %s projection over channels %i-%i of axis %i. Slice=%s' %
(self.aggregator_label, lo, hi, self.main.profile_axis, self.viewer_state.slices))
header.add_history(history)
try:
fits.writeto(pth, self._agg, header, overwrite=True)
except TypeError:
fits.writeto(pth, self._agg, header, clobber=True)
class ConstraintsWidget(QtWidgets.QWidget):
"""
A widget to display and tweak the constraints of a :class:`~glue.core.fitters.BaseFitter1D`
"""
def __init__(self, constraints, parent=None):
"""
Parameters
----------
constraints : dict
The `contstraints` property of a :class:`~glue.core.fitters.BaseFitter1D`
object
parent : QtWidgets.QWidget (optional)
The parent of this widget
"""
super(ConstraintsWidget, self).__init__(parent)
self.constraints = constraints
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(2, 2, 2, 2)
self.layout.setSpacing(4)
self.setLayout(self.layout)
self.layout.addWidget(QtWidgets.QLabel("Estimate"), 0, 1)
self.layout.addWidget(QtWidgets.QLabel("Fixed"), 0, 2)
self.layout.addWidget(QtWidgets.QLabel("Bounded"), 0, 3)
self.layout.addWidget(QtWidgets.QLabel("Lower Bound"), 0, 4)
self.layout.addWidget(QtWidgets.QLabel("Upper Bound"), 0, 5)
self._widgets = {}
names = sorted(list(self.constraints.keys()))
for k in names:
row = []
w = QtWidgets.QLabel(k)
row.append(w)
v = QtGui.QDoubleValidator()
e = QtWidgets.QLineEdit()
e.setValidator(v)
e.setText(str(constraints[k]['value'] or ''))
row.append(e)
w = QtWidgets.QCheckBox()
w.setChecked(constraints[k]['fixed'])
fix = w
row.append(w)
w = QtWidgets.QCheckBox()
limits = constraints[k]['limits']
w.setChecked(limits is not None)
bound = w
row.append(w)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[0]))
row.append(e)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[1]))
row.append(e)
def unset(w):
def result(active):
if active:
w.setChecked(False)
return result
fix.toggled.connect(unset(bound))
bound.toggled.connect(unset(fix))
self._widgets[k] = row
for i, row in enumerate(names, 1):
for j, widget in enumerate(self._widgets[row]):
self.layout.addWidget(widget, i, j)
def settings(self, name):
""" Return the constraints for a single model parameter """
row = self._widgets[name]
name, value, fixed, limited, lo, hi = row
value = float(value.text()) if value.text() else None
fixed = fixed.isChecked()
limited = limited.isChecked()
lo = lo.text()
hi = hi.text()
limited = limited and not ((not lo) or (not hi))
limits = None if not limited else [float(lo), float(hi)]
return dict(value=value, fixed=fixed, limits=limits)
def update_constraints(self, fitter):
""" Update the constraints in a :class:`~glue.core.fitters.BaseFitter1D`
based on the settings in this widget
"""
for name in self._widgets:
s = self.settings(name)
fitter.set_constraint(name, **s)
class FitSettingsWidget(QtWidgets.QDialog):
def __init__(self, fitter, parent=None):
super(FitSettingsWidget, self).__init__(parent)
self.fitter = fitter
self._build_form()
self._connect()
self.setModal(True)
def _build_form(self):
fitter = self.fitter
l = QtWidgets.QFormLayout()
options = fitter.options
self.widgets = {}
self.forms = {}
for k in sorted(options):
item = build_form_item(fitter, k)
l.addRow(item.label, item.widget)
self.widgets[k] = item.widget
self.forms[k] = item # need to prevent garbage collection
constraints = fitter.constraints
if constraints:
self.constraints = ConstraintsWidget(constraints)
l.addRow(self.constraints)
else:
self.constraints = None
self.okcancel = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
l.addRow(self.okcancel)
self.setLayout(l)
def _connect(self):
self.okcancel.accepted.connect(self.accept)
self.okcancel.rejected.connect(self.reject)
self.accepted.connect(self.update_fitter_from_settings)
def update_fitter_from_settings(self):
for k, v in self.widgets.items():
setattr(self.fitter, k, v.value())
if self.constraints is not None:
self.constraints.update_constraints(self.fitter)
class FitContext(SpectrumContext):
"""
Mode to fit a range of a spectrum with a model fitter.
Fitters are taken from user-defined fit plugins, or
:class:`~glue.core.fitters.BaseFitter1D` subclasses
"""
error = CurrentComboProperty('ui.uncertainty_combo')
fitter = CurrentComboProperty('ui.profile_combo')
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
self.ui = load_ui('spectrum_fit_panel.ui', None,
directory=os.path.dirname(__file__))
self.ui.uncertainty_combo.hide()
self.ui.uncertainty_label.hide()
font = QtGui.QFont("Courier")
font.setStyleHint(font.Monospace)
self.ui.results_box.document().setDefaultFont(font)
self.ui.results_box.setLineWrapMode(self.ui.results_box.NoWrap)
self.widget = self.ui
for fitter in list(fit_plugin):
self.ui.profile_combo.addItem(fitter.label,
userData=fitter())
def _edit_model_options(self):
d = FitSettingsWidget(self.fitter)
d.exec_()
def _connect(self):
self.ui.fit_button.clicked.connect(nonpartial(self.fit))
self.ui.clear_button.clicked.connect(nonpartial(self.clear))
self.ui.settings_button.clicked.connect(
nonpartial(self._edit_model_options))
def fit(self):
"""
Fit a model to the data
The fitting happens on a dedicated thread, to keep the UI
responsive
"""
xlim = self.grip.range
fitter = self.fitter
def on_success(result):
fit_result, _, _, _ = result
self._report_fit(fitter.summarize(*result))
self.main.profile.plot_fit(fitter, fit_result)
def on_fail(exc_info):
exc = '\n'.join(traceback.format_exception(*exc_info))
self._report_fit("Error during fitting:\n%s" % exc)
def on_done():
self.ui.fit_button.setText("Fit")
self.ui.fit_button.setEnabled(True)
self.canvas.draw()
self.ui.fit_button.setText("Running...")
self.ui.fit_button.setEnabled(False)
w = Worker(self.main.profile.fit, fitter, xlim=xlim)
w.result.connect(on_success)
w.error.connect(on_fail)
w.finished.connect(on_done)
self._fit_worker = w # hold onto a reference
w.start()
def _report_fit(self, report):
self.ui.results_box.document().setPlainText(report)
def clear(self):
self.ui.results_box.document().setPlainText('')
self.main.profile.clear_fit()
self.canvas.draw()
class SpectrumMainWindow(QtWidgets.QMainWindow):
"""
The main window that the spectrum viewer is embedded in.
Defines two signals to trigger when a subset is dropped into the window,
and when the window is closed.
"""
subset_dropped = QtCore.Signal(object)
window_closed = QtCore.Signal()
def __init__(self, parent=None):
super(SpectrumMainWindow, self).__init__(parent=parent)
self.setAcceptDrops(True)
def closeEvent(self, event):
self.window_closed.emit()
return super(SpectrumMainWindow, self).closeEvent(event)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
layer = event.mimeData().data(LAYERS_MIME_TYPE)[0]
if isinstance(layer, Subset):
self.subset_dropped.emit(layer)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
@viewer_tool
class SpectrumExtractorMode(RoiMode):
"""
Lets the user select a region in an image and, when connected to a
SpectrumExtractorTool, uses this to display spectra extracted from that
position
"""
persistent = True
icon = 'glue_spectrum'
tool_id = 'spectrum'
action_text = 'Spectrum'
tool_tip = 'Extract a spectrum from the selection'
shortcut = 'S'
def __init__(self, viewer, **kwargs):
super(SpectrumExtractorMode, self).__init__(viewer, **kwargs)
self._roi_tool = qt_roi.QtRectangularROI(self._axes) # default
self._tool = SpectrumTool(self.viewer, self)
self._release_callback = self._tool._update_profile
self._move_callback = self._tool._move_profile
self._roi_callback = None
self.viewer.state.add_callback('reference_data', self._on_reference_data_change)
def _on_reference_data_change(self, reference_data):
if reference_data is not None:
self.enabled = reference_data.ndim == 3
def menu_actions(self):
result = []
a = QtWidgets.QAction('Rectangle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Rectangle'))
result.append(a)
a = QtWidgets.QAction('Circle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Circle'))
result.append(a)
a = QtWidgets.QAction('Polygon', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Polygon'))
result.append(a)
for r in result:
if self._move_callback is not None:
r.triggered.connect(nonpartial(self._move_callback, self))
return result
def set_roi_tool(self, mode):
if mode is 'Rectangle':
self._roi_tool = qt_roi.QtRectangularROI(self._axes)
if mode is 'Circle':
self._roi_tool = qt_roi.QtCircularROI(self._axes)
if mode is 'Polygon':
self._roi_tool = qt_roi.QtPolygonalROI(self._axes)
self._roi_tool.plot_opts.update(edgecolor='#c51b7d',
facecolor=None,
edgewidth=3,
alpha=1.0)
def close(self):
self._tool.close()
return super(SpectrumExtractorMode, self).close()
# TODO: refactor this so that we don't have a separate tool and mode
class SpectrumTool(object):
"""
Main widget for interacting with spectra extracted from an image.
Provides different contexts for interacting with the spectrum:
*navigation context* lets the user set the slice in the parent image
by dragging a bar on the spectrum
*fit context* lets the user fit models to a portion of the spectrum
*collapse context* lets the users collapse a section of a cube to a 2D image
"""
def __init__(self, image_viewer, mouse_mode):
self._relim_requested = True
self.image_viewer = image_viewer
self.viewer_state = self.image_viewer.state
self.image_viewer.window_closed.connect(self.close)
self._build_main_widget()
self.profile = ProfileViewer(self.canvas.fig)
self.axes = self.profile.axes
self.mouse_mode = mouse_mode
self._setup_toolbar()
self._setup_ctxbar()
self._connect()
w = self.image_viewer.session.application.add_widget(self,
label='Profile')
w.close()
def close(self):
if hasattr(self, '_mdi_wrapper'):
self._mdi_wrapper.close()
else:
self.widget.close()
self.image_viewer = None
@property
def enabled(self):
"""Return whether the window is visible and active"""
return self.widget.isVisible()
def mdi_wrap(self):
sub = GlueMdiSubWindow()
sub.setWidget(self.widget)
self.widget.destroyed.connect(sub.close)
sub.resize(self.widget.size())
self._mdi_wrapper = sub
return sub
def _build_main_widget(self):
self.widget = SpectrumMainWindow()
self.widget.window_closed.connect(self.reset)
w = QtWidgets.QWidget()
l = QtWidgets.QHBoxLayout()
l.setSpacing(2)
l.setContentsMargins(2, 2, 2, 2)
w.setLayout(l)
mpl = MplWidget()
self.canvas = mpl.canvas
l.addWidget(mpl)
l.setStretchFactor(mpl, 5)
self.widget.setCentralWidget(w)
# TODO: fix hacks
w.canvas = self.canvas
self.widget.central_widget = w
def _setup_ctxbar(self):
l = self.widget.centralWidget().layout()
self._contexts = [NavContext(self),
FitContext(self),
CollapseContext(self)]
tabs = QtWidgets.QTabWidget(parent=self.widget)
# The following is needed because of a bug in Qt which means that
# tab titles don't get scaled right.
if platform.system() == 'Darwin':
app = get_qapp()
app_font = app.font()
tabs.setStyleSheet('font-size: {0}px'.format(app_font.pointSize()))
tabs.addTab(self._contexts[0].widget, 'Navigate')
tabs.addTab(self._contexts[1].widget, 'Fit')
tabs.addTab(self._contexts[2].widget, 'Collapse')
self._tabs = tabs
self._tabs.setVisible(False)
l.addWidget(tabs)
l.setStretchFactor(tabs, 0)
def _connect(self):
add_callback(self.viewer_state, 'x_att',
self.reset)
add_callback(self.viewer_state, 'y_att',
self.reset)
def _on_tab_change(index):
for i, ctx in enumerate(self._contexts):
ctx.set_enabled(i == index)
if i == index:
self.profile.active_grip = ctx.grip
self._tabs.currentChanged.connect(_on_tab_change)
_on_tab_change(self._tabs.currentIndex())
self.widget.subset_dropped.connect(self._extract_subset_profile)
def _setup_toolbar(self):
tb = MatplotlibViewerToolbar(self.widget)
# disable ProfileViewer mouse processing during mouse modes
tb.tool_activated.connect(self.profile.disconnect)
tb.tool_deactivated.connect(self.profile.connect)
self._menu_toggle_action = QtWidgets.QAction("Options", tb)
self._menu_toggle_action.setCheckable(True)
self._menu_toggle_action.toggled.connect(self._toggle_menu)
tb.addAction(self._menu_toggle_action)
self.widget.addToolBar(tb)
return tb
def _toggle_menu(self, active):
self._tabs.setVisible(active)
def reset(self, *args):
self.hide()
self.mouse_mode.clear()
self._relim_requested = True
@property
def data(self):
return self.viewer_state.reference_data
@property
def profile_axis(self):
# XXX make this settable
# defaults to the non-xy axis with the most channels
try:
slc = self.viewer_state.wcsaxes_slice[::-1]
except AttributeError:
return None
candidates = [i for i, s in enumerate(slc) if s not in ['x', 'y']]
return max(candidates, key=lambda i: self.data.shape[i])
def _recenter_grips(self):
for ctx in self._contexts:
ctx.recenter(self.axes.get_xlim())
def _extract_subset_profile(self, subset):
slc = self.viewer_state.slices
try:
x, y = Extractor.subset_spectrum(subset,
self.viewer_state.display_attribute,
slc,
self.profile_axis)
except IncompatibleAttribute:
return
self._set_profile(x, y)
def _update_from_roi(self, roi):
data = self.data
att = self.viewer_state.layers[0].attribute
slc = self.viewer_state.wcsaxes_slice[::-1]
if data is None or att is None:
return
zax = self.profile_axis
x, y = Extractor.spectrum(data, att, roi, slc, zax)
self._set_profile(x, y)
def _update_profile(self, *args):
roi = self.mouse_mode.roi()
return self._update_from_roi(roi)
def _move_profile(self, *args):
if self.mouse_mode._roi_tool._scrubbing:
self._update_profile(args)
def _set_profile(self, x, y):
data = self.data
xid = data.get_world_component_id(self.profile_axis)
units = data.get_component(xid).units
xlabel = str(xid) if units is None else '%s [%s]' % (xid, units)
xlim = self.axes.get_xlim()
self.profile.set_xlabel(xlabel)
self.profile.set_profile(x, y, color='k')
# relim x range if requested
if self._relim_requested:
self._relim_requested = False
self.axes.set_xlim(np.nanmin(x), np.nanmax(x))
# relim y range to data within the view window
self.profile.autoscale_ylim()
if self.axes.get_xlim() != xlim:
self._recenter_grips()
self.axes.figure.canvas.draw()
self.show()
def _move_below_image_viewer(self):
rect = self.image_viewer.frameGeometry()
pos = rect.bottomLeft()
self._mdi_wrapper.setGeometry(pos.x(), pos.y(),
rect.width(), 300)
def show(self):
if self.widget.isVisible():
return
self._move_below_image_viewer()
self.widget.show()
def hide(self):
if hasattr(self, '_mdi_wrapper'):
self._mdi_wrapper.close()
else:
self.widget.close()
def _get_modes(self, axes):
return [self.mouse_mode]
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/plugins/tools/spectrum_tool/qt/spectrum_tool.py",
"copies": "1",
"size": "31628",
"license": "bsd-3-clause",
"hash": -2655220350510963000,
"line_mean": 30.6913827655,
"line_max": 101,
"alpha_frac": 0.5857784242,
"autogenerated": false,
"ratio": 3.938239322624829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024017746824829,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import logging
import traceback
import numpy as np
from qtpy import QtCore, QtGui, QtWidgets, compat
from qtpy.QtCore import Qt
from glue.external.six.moves import range as xrange
from glue.core.aggregate import Aggregate
from glue.core.exceptions import IncompatibleAttribute
from glue.core import Subset
from glue.core.callback_property import add_callback, ignore_callback
from glue.config import fit_plugin, viewer_tool
from glue.viewers.common.qt.mpl_toolbar import MatplotlibViewerToolbar
from glue.core.qt.mime import LAYERS_MIME_TYPE
from glue.viewers.common.qt.mouse_mode import RoiMode
from glue.utils.qt import load_ui
from glue.core.qt.simpleforms import build_form_item
from glue.utils.qt.widget_properties import CurrentComboProperty
from glue.app.qt.mdi_area import GlueMdiSubWindow
from glue.viewers.common.qt.mpl_widget import MplWidget
from glue.utils import nonpartial, Pointer
from glue.utils.qt import Worker, messagebox_on_error
from glue.core import roi as core_roi
from glue.core.subset import RoiSubsetState
from glue.core.qt import roi as qt_roi
from .profile_viewer import ProfileViewer
class Extractor(object):
# Warning:
# Coordinate conversion is not well-defined if pix2world is not
# monotonic!
@staticmethod
def abcissa(data, axis):
slc = [0 for _ in data.shape]
slc[axis] = slice(None, None)
att = data.get_world_component_id(axis)
return data[att, tuple(slc)].ravel()
@staticmethod
def spectrum(data, attribute, roi, slc, zaxis):
# Find the integer index of the x and y axes, which are the axes for
# which the image is shown (the ROI is drawn along these attributes)
xaxis = slc.index('x')
yaxis = slc.index('y')
# Get the actual component IDs corresponding to these axes
xatt = data.get_pixel_component_id(xaxis)
yatt = data.get_pixel_component_id(yaxis)
# Set up a view that does not reduce the dimensionality of the array but
# extracts 1-element slices along dimensions that are not relevant.
view = []
for idim, dim in enumerate(slc):
if idim in (xaxis, yaxis, zaxis):
view.append(slice(None))
else:
view.append(slice(dim, dim + 1))
view = tuple(view)
# We now delegate to RoiSubsetState to compute the mask based on the ROI
subset_state = RoiSubsetState(xatt=xatt, yatt=yatt, roi=roi)
mask = subset_state.to_mask(data, view=view)
# We now extract the values that fall inside the ROI. Unfortunately,
# this returns a flat 1-d array, so we need to then reshape it to get
# an array with shape (n_spec, n_pix), where n_pix is the number of
# pixels inside the ROI
values = data[attribute, view]
if zaxis != 0:
values = values.swapaxes(zaxis, 0)
mask = mask.swapaxes(zaxis, 0)
values = values[mask].reshape(data.shape[zaxis], -1)
# We then average along the spatial dimension
spectrum = np.nanmean(values, axis=1)
# Get the world coordinates of the spectral axis
x = Extractor.abcissa(data, zaxis)
return x, spectrum
@staticmethod
def world2pixel(data, axis, value):
x = Extractor.abcissa(data, axis)
if x.size > 1 and (x[1] < x[0]):
x = x[::-1]
result = x.size - np.searchsorted(x, value) - 2
else:
result = np.searchsorted(x, value) - 1
return np.clip(result, 0, x.size - 1)
@staticmethod
def pixel2world(data, axis, value):
x = Extractor.abcissa(data, axis)
return x[np.clip(value, 0, x.size - 1)]
@staticmethod
def subset_spectrum(subset, attribute, slc, zaxis):
"""
Extract a spectrum from a subset.
This makes a mask of the subset in the **current slice**,
and extracts a tube of this shape over all slices along ``zaxis``.
In other words, the variation of the subset along ``zaxis`` is ignored,
and only the interaction of the subset and the slice is relevant.
:param subset: A :class:`~glue.core.subset.Subset`
:param attribute: The :class:`~glue.core.data.ComponentID` to extract
:param slc: A tuple describing the slice
:param zaxis: Which axis to integrate over
"""
data = subset.data
x = Extractor.abcissa(data, zaxis)
view = [slice(s, s + 1)
if s not in ['x', 'y'] else slice(None)
for s in slc]
mask = np.squeeze(subset.to_mask(view))
if slc.index('x') < slc.index('y'):
mask = mask.T
w = np.where(mask)
view[slc.index('x')] = w[1]
view[slc.index('y')] = w[0]
result = np.empty(x.size)
# treat each channel separately, to reduce memory storage
for i in xrange(data.shape[zaxis]):
view[zaxis] = i
val = data[attribute, view]
result[i] = np.nansum(val) / np.isfinite(val).sum()
y = result
return x, y
class SpectrumContext(object):
"""
Base class for different interaction contexts
"""
client = Pointer('main.client')
data = Pointer('main.data')
profile_axis = Pointer('main.profile_axis')
canvas = Pointer('main.canvas')
profile = Pointer('main.profile')
def __init__(self, main):
self.main = main
self.grip = None
self.panel = None
self.widget = None
self._setup_grip()
self._setup_widget()
self._connect()
def _setup_grip(self):
""" Create a :class:`~glue.plugins.tools.spectrum_tool.profile_viewer.Grip` object
to interact with the plot. Assign to self.grip
"""
raise NotImplementedError()
def _setup_widget(self):
"""
Create a context-specific widget
"""
# this is the widget that is displayed to the right of the
# spectrum
raise NotImplementedError()
def _connect(self):
"""
Attach event handlers
"""
pass
def set_enabled(self, enabled):
self.enable() if enabled else self.disable()
def enable(self):
if self.grip is not None:
self.grip.enable()
def disable(self):
if self.grip is not None:
self.grip.disable()
def recenter(self, lim):
"""Re-center the grip to the given x axlis limit tuple"""
if self.grip is None:
return
if hasattr(self.grip, 'value'):
self.grip.value = sum(lim) / 2.
return
# Range grip
cen = sum(lim) / 2
wid = max(lim) - min(lim)
self.grip.range = cen - wid / 4, cen + wid / 4
class NavContext(SpectrumContext):
"""
Mode to set the 2D slice in the parent image widget by dragging
a handle in the spectrum
"""
def _setup_grip(self):
def _set_client_from_grip(value):
"""Update client.slice given grip value"""
if not self.main.enabled:
return
slc = list(self.client.slice)
# client.slice stored in pixel coords
value = Extractor.world2pixel(
self.data,
self.profile_axis, value)
slc[self.profile_axis] = value
# prevent callback bouncing. Fixes #298
with ignore_callback(self.grip, 'value'):
self.client.slice = tuple(slc)
def _set_grip_from_client(slc):
"""Update grip.value given client.slice"""
if not self.main.enabled:
return
# grip.value is stored in world coordinates
val = slc[self.profile_axis]
val = Extractor.pixel2world(self.data, self.profile_axis, val)
# If pix2world not monotonic, this can trigger infinite recursion.
# Avoid by disabling callback loop
# XXX better to specifically ignore _set_client_from_grip
with ignore_callback(self.client, 'slice'):
self.grip.value = val
self.grip = self.main.profile.new_value_grip()
add_callback(self.client, 'slice', _set_grip_from_client)
add_callback(self.grip, 'value', _set_client_from_grip)
def _connect(self):
pass
def _setup_widget(self):
self.widget = QtWidgets.QTextEdit()
self.widget.setHtml("To <b> slide </b> through the cube, "
"drag the handle or double-click<br><br><br>"
"To make a <b> new profile </b>, "
"click-drag a new box in the image, or drag "
"a subset onto the plot to the left")
self.widget.setTextInteractionFlags(Qt.NoTextInteraction)
class CollapseContext(SpectrumContext):
"""
Mode to collapse a section of a cube into a 2D image.
Supports several aggregations: mean, median, max, mom1, mom2
"""
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
w = QtWidgets.QWidget()
l = QtWidgets.QFormLayout()
w.setLayout(l)
combo = QtWidgets.QComboBox()
combo.addItem("Mean", userData=Aggregate.mean)
combo.addItem("Median", userData=Aggregate.median)
combo.addItem("Max", userData=Aggregate.max)
combo.addItem("Centroid", userData=Aggregate.mom1)
combo.addItem("Linewidth", userData=Aggregate.mom2)
run = QtWidgets.QPushButton("Collapse")
save = QtWidgets.QPushButton("Save as FITS file")
buttons = QtWidgets.QHBoxLayout()
buttons.addWidget(run)
buttons.addWidget(save)
self._save = save
self._run = run
l.addRow("", combo)
l.addRow("", buttons)
self.widget = w
self._combo = combo
self._agg = None
def _connect(self):
self._run.clicked.connect(nonpartial(self._aggregate))
self._save.clicked.connect(nonpartial(self._choose_save))
@property
def aggregator(self):
return self._combo.itemData(self._combo.currentIndex())
@property
def aggregator_label(self):
return self._combo.currentText()
def _aggregate(self):
func = self.aggregator
rng = list(self.grip.range)
rng[1] += 1
rng = Extractor.world2pixel(self.data,
self.profile_axis,
rng)
agg = Aggregate(self.data, self.client.display_attribute,
self.main.profile_axis, self.client.slice, rng)
im = func(agg)
self._agg = im
self.client.override_image(im)
@messagebox_on_error("Failed to export projection")
def _choose_save(self):
out, _ = compat.getsavefilename(filters='FITS Files (*.fits)')
if out is None:
return
self.save_to(out)
def save_to(self, pth):
"""
Write the projection to a file
Parameters
----------
pth : str
Path to write to
"""
from astropy.io import fits
data = self.client.display_data
if data is None:
raise RuntimeError("Cannot save projection -- no data to visualize")
self._aggregate()
# try to project wcs to 2D
wcs = getattr(data.coords, 'wcs', None)
if wcs:
try:
wcs.dropaxis(data.ndim - 1 - self.main.profile_axis)
header = wcs.to_header(True)
except Exception as e:
msg = "Could not extract 2D wcs for this data: %s" % e
logging.getLogger(__name__).warn(msg)
header = fits.Header()
else:
header = fits.Header()
lo, hi = self.grip.range
history = ('Created by Glue. %s projection over channels %i-%i of axis %i. Slice=%s' %
(self.aggregator_label, lo, hi, self.main.profile_axis, self.client.slice))
header.add_history(history)
fits.writeto(pth, self._agg, header, clobber=True)
class ConstraintsWidget(QtWidgets.QWidget):
"""
A widget to display and tweak the constraints of a :class:`~glue.core.fitters.BaseFitter1D`
"""
def __init__(self, constraints, parent=None):
"""
Parameters
----------
constraints : dict
The `contstraints` property of a :class:`~glue.core.fitters.BaseFitter1D`
object
parent : QtWidgets.QWidget (optional)
The parent of this widget
"""
super(ConstraintsWidget, self).__init__(parent)
self.constraints = constraints
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(2, 2, 2, 2)
self.layout.setSpacing(4)
self.setLayout(self.layout)
self.layout.addWidget(QtWidgets.QLabel("Estimate"), 0, 1)
self.layout.addWidget(QtWidgets.QLabel("Fixed"), 0, 2)
self.layout.addWidget(QtWidgets.QLabel("Bounded"), 0, 3)
self.layout.addWidget(QtWidgets.QLabel("Lower Bound"), 0, 4)
self.layout.addWidget(QtWidgets.QLabel("Upper Bound"), 0, 5)
self._widgets = {}
names = sorted(list(self.constraints.keys()))
for k in names:
row = []
w = QtWidgets.QLabel(k)
row.append(w)
v = QtGui.QDoubleValidator()
e = QtWidgets.QLineEdit()
e.setValidator(v)
e.setText(str(constraints[k]['value'] or ''))
row.append(e)
w = QtWidgets.QCheckBox()
w.setChecked(constraints[k]['fixed'])
fix = w
row.append(w)
w = QtWidgets.QCheckBox()
limits = constraints[k]['limits']
w.setChecked(limits is not None)
bound = w
row.append(w)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[0]))
row.append(e)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[1]))
row.append(e)
def unset(w):
def result(active):
if active:
w.setChecked(False)
return result
fix.toggled.connect(unset(bound))
bound.toggled.connect(unset(fix))
self._widgets[k] = row
for i, row in enumerate(names, 1):
for j, widget in enumerate(self._widgets[row]):
self.layout.addWidget(widget, i, j)
def settings(self, name):
""" Return the constraints for a single model parameter """
row = self._widgets[name]
name, value, fixed, limited, lo, hi = row
value = float(value.text()) if value.text() else None
fixed = fixed.isChecked()
limited = limited.isChecked()
lo = lo.text()
hi = hi.text()
limited = limited and not ((not lo) or (not hi))
limits = None if not limited else [float(lo), float(hi)]
return dict(value=value, fixed=fixed, limits=limits)
def update_constraints(self, fitter):
""" Update the constraints in a :class:`~glue.core.fitters.BaseFitter1D`
based on the settings in this widget
"""
for name in self._widgets:
s = self.settings(name)
fitter.set_constraint(name, **s)
class FitSettingsWidget(QtWidgets.QDialog):
def __init__(self, fitter, parent=None):
super(FitSettingsWidget, self).__init__(parent)
self.fitter = fitter
self._build_form()
self._connect()
self.setModal(True)
def _build_form(self):
fitter = self.fitter
l = QtWidgets.QFormLayout()
options = fitter.options
self.widgets = {}
self.forms = {}
for k in sorted(options):
item = build_form_item(fitter, k)
l.addRow(item.label, item.widget)
self.widgets[k] = item.widget
self.forms[k] = item # need to prevent garbage collection
constraints = fitter.constraints
if constraints:
self.constraints = ConstraintsWidget(constraints)
l.addRow(self.constraints)
else:
self.constraints = None
self.okcancel = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
l.addRow(self.okcancel)
self.setLayout(l)
def _connect(self):
self.okcancel.accepted.connect(self.accept)
self.okcancel.rejected.connect(self.reject)
self.accepted.connect(self.update_fitter_from_settings)
def update_fitter_from_settings(self):
for k, v in self.widgets.items():
setattr(self.fitter, k, v.value())
if self.constraints is not None:
self.constraints.update_constraints(self.fitter)
class FitContext(SpectrumContext):
"""
Mode to fit a range of a spectrum with a model fitter.
Fitters are taken from user-defined fit plugins, or
:class:`~glue.core.fitters.BaseFitter1D` subclasses
"""
error = CurrentComboProperty('ui.uncertainty_combo')
fitter = CurrentComboProperty('ui.profile_combo')
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
self.ui = load_ui('spectrum_fit_panel.ui', None,
directory=os.path.dirname(__file__))
self.ui.uncertainty_combo.hide()
self.ui.uncertainty_label.hide()
font = QtGui.QFont("Courier")
font.setStyleHint(font.Monospace)
self.ui.results_box.document().setDefaultFont(font)
self.ui.results_box.setLineWrapMode(self.ui.results_box.NoWrap)
self.widget = self.ui
for fitter in list(fit_plugin):
self.ui.profile_combo.addItem(fitter.label,
userData=fitter())
def _edit_model_options(self):
d = FitSettingsWidget(self.fitter)
d.exec_()
def _connect(self):
self.ui.fit_button.clicked.connect(nonpartial(self.fit))
self.ui.clear_button.clicked.connect(nonpartial(self.clear))
self.ui.settings_button.clicked.connect(
nonpartial(self._edit_model_options))
def fit(self):
"""
Fit a model to the data
The fitting happens on a dedicated thread, to keep the UI
responsive
"""
xlim = self.grip.range
fitter = self.fitter
def on_success(result):
fit_result, _, _, _ = result
self._report_fit(fitter.summarize(*result))
self.main.profile.plot_fit(fitter, fit_result)
def on_fail(exc_info):
exc = '\n'.join(traceback.format_exception(*exc_info))
self._report_fit("Error during fitting:\n%s" % exc)
def on_done():
self.ui.fit_button.setText("Fit")
self.ui.fit_button.setEnabled(True)
self.canvas.draw()
self.ui.fit_button.setText("Running...")
self.ui.fit_button.setEnabled(False)
w = Worker(self.main.profile.fit, fitter, xlim=xlim)
w.result.connect(on_success)
w.error.connect(on_fail)
w.finished.connect(on_done)
self._fit_worker = w # hold onto a reference
w.start()
def _report_fit(self, report):
self.ui.results_box.document().setPlainText(report)
def clear(self):
self.ui.results_box.document().setPlainText('')
self.main.profile.clear_fit()
self.canvas.draw()
class SpectrumMainWindow(QtWidgets.QMainWindow):
"""
The main window that the spectrum viewer is embedded in.
Defines two signals to trigger when a subset is dropped into the window,
and when the window is closed.
"""
subset_dropped = QtCore.Signal(object)
window_closed = QtCore.Signal()
def __init__(self, parent=None):
super(SpectrumMainWindow, self).__init__(parent=parent)
self.setAcceptDrops(True)
def closeEvent(self, event):
self.window_closed.emit()
return super(SpectrumMainWindow, self).closeEvent(event)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
layer = event.mimeData().data(LAYERS_MIME_TYPE)[0]
if isinstance(layer, Subset):
self.subset_dropped.emit(layer)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
@viewer_tool
class SpectrumExtractorMode(RoiMode):
"""
Lets the user select a region in an image and, when connected to a
SpectrumExtractorTool, uses this to display spectra extracted from that
position
"""
persistent = True
icon = 'glue_spectrum'
tool_id = 'spectrum'
action_text = 'Spectrum'
tool_tip = 'Extract a spectrum from the selection'
shortcut = 'S'
def __init__(self, viewer, **kwargs):
super(SpectrumExtractorMode, self).__init__(viewer, **kwargs)
self._roi_tool = qt_roi.QtRectangularROI(self._axes) # default
self._tool = SpectrumTool(self.viewer, self)
self._release_callback = self._tool._update_profile
self._move_callback = self._tool._move_profile
self._roi_callback = None
def menu_actions(self):
result = []
a = QtWidgets.QAction('Rectangle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Rectangle'))
result.append(a)
a = QtWidgets.QAction('Circle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Circle'))
result.append(a)
a = QtWidgets.QAction('Polygon', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Polygon'))
result.append(a)
for r in result:
if self._move_callback is not None:
r.triggered.connect(nonpartial(self._move_callback, self))
return result
def set_roi_tool(self, mode):
if mode is 'Rectangle':
self._roi_tool = qt_roi.QtRectangularROI(self._axes)
if mode is 'Circle':
self._roi_tool = qt_roi.QtCircularROI(self._axes)
if mode is 'Polygon':
self._roi_tool = qt_roi.QtPolygonalROI(self._axes)
self._roi_tool.plot_opts.update(edgecolor='#c51b7d',
facecolor=None,
edgewidth=3,
alpha=1.0)
def close(self):
self._tool.close()
return super(SpectrumExtractorMode, self).close()
# TODO: refactor this so that we don't have a separate tool and mode
class SpectrumTool(object):
"""
Main widget for interacting with spectra extracted from an image.
Provides different contexts for interacting with the spectrum:
*navigation context* lets the user set the slice in the parent image
by dragging a bar on the spectrum
*fit context* lets the user fit models to a portion of the spectrum
*collapse context* lets the users collapse a section of a cube to a 2D image
"""
def __init__(self, image_widget, mouse_mode):
self._relim_requested = True
self.image_widget = image_widget
self._build_main_widget()
self.client = self.image_widget.client
self.profile = ProfileViewer(self.canvas.fig)
self.axes = self.profile.axes
self.mouse_mode = mouse_mode
self._setup_toolbar()
self._setup_ctxbar()
self._connect()
w = self.image_widget.session.application.add_widget(self,
label='Profile')
w.close()
def close(self):
if hasattr(self, '_mdi_wrapper'):
self._mdi_wrapper.close()
else:
self.widget.close()
@property
def enabled(self):
"""Return whether the window is visible and active"""
return self.widget.isVisible()
def mdi_wrap(self):
sub = GlueMdiSubWindow()
sub.setWidget(self.widget)
self.widget.destroyed.connect(sub.close)
sub.resize(self.widget.size())
self._mdi_wrapper = sub
return sub
def _build_main_widget(self):
self.widget = SpectrumMainWindow()
self.widget.window_closed.connect(self.reset)
w = QtWidgets.QWidget()
l = QtWidgets.QHBoxLayout()
l.setSpacing(2)
l.setContentsMargins(2, 2, 2, 2)
w.setLayout(l)
mpl = MplWidget()
self.canvas = mpl.canvas
l.addWidget(mpl)
l.setStretchFactor(mpl, 5)
self.widget.setCentralWidget(w)
# TODO: fix hacks
w.canvas = self.canvas
self.widget.central_widget = w
def _setup_ctxbar(self):
l = self.widget.centralWidget().layout()
self._contexts = [NavContext(self),
FitContext(self),
CollapseContext(self)]
tabs = QtWidgets.QTabWidget()
tabs.addTab(self._contexts[0].widget, 'Navigate')
tabs.addTab(self._contexts[1].widget, 'Fit')
tabs.addTab(self._contexts[2].widget, 'Collapse')
self._tabs = tabs
self._tabs.setVisible(False)
l.addWidget(tabs)
l.setStretchFactor(tabs, 0)
def _connect(self):
add_callback(self.client, 'slice',
self._check_invalidate,
echo_old=True)
def _on_tab_change(index):
for i, ctx in enumerate(self._contexts):
ctx.set_enabled(i == index)
if i == index:
self.profile.active_grip = ctx.grip
self._tabs.currentChanged.connect(_on_tab_change)
_on_tab_change(self._tabs.currentIndex())
self.widget.subset_dropped.connect(self._extract_subset_profile)
def _setup_toolbar(self):
tb = MatplotlibViewerToolbar(self.widget)
# disable ProfileViewer mouse processing during mouse modes
tb.tool_activated.connect(self.profile.disconnect)
tb.tool_deactivated.connect(self.profile.connect)
self._menu_toggle_action = QtWidgets.QAction("Options", tb)
self._menu_toggle_action.setCheckable(True)
self._menu_toggle_action.toggled.connect(self._toggle_menu)
tb.addAction(self._menu_toggle_action)
self.widget.addToolBar(tb)
return tb
def _toggle_menu(self, active):
self._tabs.setVisible(active)
def _check_invalidate(self, slc_old, slc_new):
"""
If we change the orientation of the slice,
reset and hide the profile viewer
"""
if self.profile_axis is None or not self.enabled:
return
if (slc_old.index('x') != slc_new.index('x') or
slc_old.index('y') != slc_new.index('y')):
self.reset()
def reset(self):
self.hide()
self.mouse_mode.clear()
self._relim_requested = True
@property
def data(self):
return self.client.display_data
@property
def profile_axis(self):
# XXX make this settable
# defaults to the non-xy axis with the most channels
slc = self.client.slice
candidates = [i for i, s in enumerate(slc) if s not in ['x', 'y']]
return max(candidates, key=lambda i: self.data.shape[i])
def _recenter_grips(self):
for ctx in self._contexts:
ctx.recenter(self.axes.get_xlim())
def _extract_subset_profile(self, subset):
slc = self.client.slice
try:
x, y = Extractor.subset_spectrum(subset,
self.client.display_attribute,
slc,
self.profile_axis)
except IncompatibleAttribute:
return
self._set_profile(x, y)
def _update_from_roi(self, roi):
data = self.data
att = self.client.display_attribute
slc = self.client.slice
if data is None or att is None:
return
zax = self.profile_axis
x, y = Extractor.spectrum(data, att, roi, slc, zax)
self._set_profile(x, y)
def _update_profile(self, *args):
roi = self.mouse_mode.roi()
return self._update_from_roi(roi)
def _move_profile(self, *args):
if self.mouse_mode._roi_tool._scrubbing:
self._update_profile(args)
def _set_profile(self, x, y):
data = self.data
xid = data.get_world_component_id(self.profile_axis)
units = data.get_component(xid).units
xlabel = str(xid) if units is None else '%s [%s]' % (xid, units)
xlim = self.axes.get_xlim()
self.profile.set_xlabel(xlabel)
self.profile.set_profile(x, y, color='k')
# relim x range if requested
if self._relim_requested:
self._relim_requested = False
self.axes.set_xlim(np.nanmin(x), np.nanmax(x))
# relim y range to data within the view window
self.profile.autoscale_ylim()
if self.axes.get_xlim() != xlim:
self._recenter_grips()
self.axes.figure.canvas.draw()
self.show()
def _move_below_image_widget(self):
rect = self.image_widget.frameGeometry()
pos = rect.bottomLeft()
self._mdi_wrapper.setGeometry(pos.x(), pos.y(),
rect.width(), 300)
def show(self):
if self.widget.isVisible():
return
self._move_below_image_widget()
self.widget.show()
def hide(self):
self.widget.close()
def _get_modes(self, axes):
return [self.mouse_mode]
def _display_data_hook(self, data):
if data is not None:
self.mouse_mode.enabled = data.ndim > 2
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/tools/spectrum_tool/qt/spectrum_tool.py",
"copies": "1",
"size": "30416",
"license": "bsd-3-clause",
"hash": -4290394163628995600,
"line_mean": 30.5191709845,
"line_max": 95,
"alpha_frac": 0.5851854287,
"autogenerated": false,
"ratio": 3.9180729099574907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500325833865749,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import math
import numpy as np
import torch
import torch.legacy.optim as legacyOptim
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.autograd import Variable
from envs import create_atari_env
from model import ES
import matplotlib.pyplot as plt
def do_rollouts(args, models, random_seeds, return_queue, env, are_negative):
"""
For each model, do a rollout. Supports multiple models per thread but
don't do it -- it's inefficient (it's mostly a relic of when I would run
both a perturbation and its antithesis on the same thread).
"""
all_returns = []
all_num_frames = []
for model in models:
if not args.small_net:
cx = Variable(torch.zeros(1, 256))
hx = Variable(torch.zeros(1, 256))
state = env.reset()
state = torch.from_numpy(state)
this_model_return = 0
this_model_num_frames = 0
# Rollout
for step in range(args.max_episode_length):
if args.small_net:
state = state.float()
state = state.view(1, env.observation_space.shape[0])
logit = model(Variable(state, volatile=True))
else:
logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True),
(hx, cx)))
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
this_model_return += reward
this_model_num_frames += 1
if done:
break
state = torch.from_numpy(state)
all_returns.append(this_model_return)
all_num_frames.append(this_model_num_frames)
return_queue.put((random_seeds, all_returns, all_num_frames, are_negative))
def perturb_model(args, model, random_seed, env):
"""
Modifies the given model with a pertubation of its parameters,
as well as the negative perturbation, and returns both perturbed
models.
"""
new_model = ES(env.observation_space.shape[0],
env.action_space, args.small_net)
anti_model = ES(env.observation_space.shape[0],
env.action_space, args.small_net)
new_model.load_state_dict(model.state_dict())
anti_model.load_state_dict(model.state_dict())
np.random.seed(random_seed)
for (k, v), (anti_k, anti_v) in zip(new_model.es_params(),
anti_model.es_params()):
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.sigma*eps).float()
anti_v += torch.from_numpy(args.sigma*-eps).float()
return [new_model, anti_model]
optimConfig = []
averageReward = []
maxReward = []
minReward = []
episodeCounter = []
def gradient_update(args, synced_model, returns, random_seeds, neg_list,
num_eps, num_frames, chkpt_dir, unperturbed_results):
def fitness_shaping(returns):
"""
A rank transformation on the rewards, which reduces the chances
of falling into local optima early in training.
"""
sorted_returns_backwards = sorted(returns)[::-1]
lamb = len(returns)
shaped_returns = []
denom = sum([max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
for r in returns])
for r in returns:
num = max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
shaped_returns.append(num/denom + 1/lamb)
return shaped_returns
def unperturbed_rank(returns, unperturbed_results):
nth_place = 1
for r in returns:
if r > unperturbed_results:
nth_place += 1
rank_diag = ('%d out of %d (1 means gradient '
'is uninformative)' % (nth_place,
len(returns) + 1))
return rank_diag, nth_place
batch_size = len(returns)
assert batch_size == args.n
assert len(random_seeds) == batch_size
shaped_returns = fitness_shaping(returns)
rank_diag, rank = unperturbed_rank(returns, unperturbed_results)
if not args.silent:
print('Episode num: %d\n'
'Average reward: %f\n'
'Variance in rewards: %f\n'
'Max reward: %f\n'
'Min reward: %f\n'
'Batch size: %d\n'
'Max episode length: %d\n'
'Sigma: %f\n'
'Learning rate: %f\n'
'Total num frames seen: %d\n'
'Unperturbed reward: %f\n'
'Unperturbed rank: %s\n'
'Using Adam: %r\n\n' %
(num_eps, np.mean(returns), np.var(returns), max(returns),
min(returns), batch_size,
args.max_episode_length, args.sigma, args.lr, num_frames,
unperturbed_results, rank_diag, args.useAdam))
averageReward.append(np.mean(returns))
episodeCounter.append(num_eps)
maxReward.append(max(returns))
minReward.append(min(returns))
pltAvg, = plt.plot(episodeCounter, averageReward, label='average')
pltMax, = plt.plot(episodeCounter, maxReward, label='max')
pltMin, = plt.plot(episodeCounter, minReward, label='min')
plt.ylabel('rewards')
plt.xlabel('episode num')
plt.legend(handles=[pltAvg, pltMax,pltMin])
fig1 = plt.gcf()
plt.draw()
fig1.savefig('graph.png', dpi=100)
# For each model, generate the same random numbers as we did
# before, and update parameters. We apply weight decay once.
if args.useAdam:
globalGrads = None
for i in range(args.n):
np.random.seed(random_seeds[i])
multiplier = -1 if neg_list[i] else 1
reward = shaped_returns[i]
localGrads = []
idx = 0
for k, v in synced_model.es_params():
eps = np.random.normal(0, 1, v.size())
grad = torch.from_numpy((args.n*args.sigma) * (reward*multiplier*eps)).float()
localGrads.append(grad)
if len(optimConfig) == idx:
optimConfig.append({ 'learningRate' : args.lr })
idx = idx + 1
if globalGrads == None:
globalGrads = localGrads
else:
for i in range(len(globalGrads)):
globalGrads[i] = torch.add(globalGrads[i], localGrads[i])
idx = 0
for k, v in synced_model.es_params():
r, _ = legacyOptim.adam( lambda x: (1, -globalGrads[idx]), v , optimConfig[idx])
v.copy_(r)
idx = idx + 1
else:
# For each model, generate the same random numbers as we did
# before, and update parameters. We apply weight decay once.
for i in range(args.n):
np.random.seed(random_seeds[i])
multiplier = -1 if neg_list[i] else 1
reward = shaped_returns[i]
for k, v in synced_model.es_params():
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.lr/(args.n*args.sigma) *
(reward*multiplier*eps)).float()
args.lr *= args.lr_decay
torch.save(synced_model.state_dict(),
os.path.join(chkpt_dir, 'latest.pth'))
return synced_model
def render_env(args, model, env):
while True:
state = env.reset()
state = torch.from_numpy(state)
this_model_return = 0
if not args.small_net:
cx = Variable(torch.zeros(1, 256))
hx = Variable(torch.zeros(1, 256))
done = False
while not done:
if args.small_net:
state = state.float()
state = state.view(1, env.observation_space.shape[0])
logit = model(Variable(state, volatile=True))
else:
logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True),
(hx, cx)))
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
env.render()
this_model_return += reward
state = torch.from_numpy(state)
print('Reward: %f' % this_model_return)
def generate_seeds_and_models(args, synced_model, env):
"""
Returns a seed and 2 perturbed models
"""
np.random.seed()
random_seed = np.random.randint(2**30)
two_models = perturb_model(args, synced_model, random_seed, env)
return random_seed, two_models
def train_loop(args, synced_model, env, chkpt_dir):
def flatten(raw_results, index):
notflat_results = [result[index] for result in raw_results]
return [item for sublist in notflat_results for item in sublist]
print("Num params in network %d" % synced_model.count_parameters())
num_eps = 0
total_num_frames = 0
for _ in range(args.max_gradient_updates):
processes = []
return_queue = mp.Queue()
all_seeds, all_models = [], []
# Generate a perturbation and its antithesis
for j in range(int(args.n/2)):
random_seed, two_models = generate_seeds_and_models(args,
synced_model,
env)
# Add twice because we get two models with the same seed
all_seeds.append(random_seed)
all_seeds.append(random_seed)
all_models += two_models
assert len(all_seeds) == len(all_models)
# Keep track of which perturbations were positive and negative
# Start with negative true because pop() makes us go backwards
is_negative = True
# Add all peturbed models to the queue
while all_models:
perturbed_model = all_models.pop()
seed = all_seeds.pop()
p = mp.Process(target=do_rollouts, args=(args,
[perturbed_model],
[seed],
return_queue,
env,
[is_negative]))
p.start()
processes.append(p)
is_negative = not is_negative
assert len(all_seeds) == 0
# Evaluate the unperturbed model as well
p = mp.Process(target=do_rollouts, args=(args, [synced_model],
['dummy_seed'],
return_queue, env,
['dummy_neg']))
p.start()
processes.append(p)
for p in processes:
p.join()
raw_results = [return_queue.get() for p in processes]
seeds, results, num_frames, neg_list = [flatten(raw_results, index)
for index in [0, 1, 2, 3]]
# Separate the unperturbed results from the perturbed results
_ = unperturbed_index = seeds.index('dummy_seed')
seeds.pop(unperturbed_index)
unperturbed_results = results.pop(unperturbed_index)
_ = num_frames.pop(unperturbed_index)
_ = neg_list.pop(unperturbed_index)
total_num_frames += sum(num_frames)
num_eps += len(results)
synced_model = gradient_update(args, synced_model, results, seeds,
neg_list, num_eps, total_num_frames,
chkpt_dir, unperturbed_results)
if args.variable_ep_len:
args.max_episode_length = int(2*sum(num_frames)/len(num_frames))
| {
"repo_name": "lolz0r/pytorch-es",
"path": "train.py",
"copies": "1",
"size": "12054",
"license": "mit",
"hash": 8557483933046480000,
"line_mean": 38.1363636364,
"line_max": 94,
"alpha_frac": 0.5398208064,
"autogenerated": false,
"ratio": 3.800126103404792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9834981322916232,
"avg_score": 0.0009931173777118982,
"num_lines": 308
} |
from __future__ import absolute_import, division, print_function
import os
import matplotlib
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT
from ..external.qt import QtCore, QtGui
from ..external.qt.QtGui import QMenu
from ..external.qt.QtCore import Qt, Signal
from ..core.callback_property import add_callback
from .qtutil import get_icon, nonpartial
class GlueToolbar(NavigationToolbar2QT):
pan_begin = Signal()
pan_end = Signal()
mode_activated = Signal()
mode_deactivated = Signal()
def __init__(self, canvas, frame, name=None):
""" Create a new toolbar object
Parameters
----------
canvas : Maptloblib canvas instance
The drawing canvas to interact with
frame : QWidget
The QT frame that the canvas is embedded within.
"""
self.buttons = {}
self.__active = None
self.basedir = None
NavigationToolbar2QT.__init__(self, canvas, frame)
if name is not None:
self.setWindowTitle(name)
self.setIconSize(QtCore.QSize(25, 25))
self.layout().setSpacing(1)
self.setFocusPolicy(Qt.StrongFocus)
self._idKey = None
# pyside is prone to segfaults if slots hold the only
# reference to a signal, so we hold an extra reference
# see https://bugreports.qt-project.org/browse/PYSIDE-88
self.__signals = []
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
parent = QtGui.QToolBar.parent(self)
a = QtGui.QAction(get_icon('glue_home'),
'Home', parent)
a.triggered.connect(nonpartial(self.home))
a.setToolTip('Reset original zoom')
a.setShortcut('H')
a.setShortcutContext(Qt.WidgetShortcut)
parent.addAction(a)
self.buttons['HOME'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_filesave'),
'Save', parent)
a.triggered.connect(nonpartial(self.save_figure))
a.setToolTip('Save the figure')
a.setShortcut('Ctrl+Shift+S')
parent.addAction(a)
self.buttons['SAVE'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_back'),
'Back', parent)
a.triggered.connect(nonpartial(self.back))
parent.addAction(a)
self.addAction(a)
self.buttons['BACK'] = a
a.setToolTip('Back to previous view')
a = QtGui.QAction(get_icon('glue_forward'),
'Forward', parent)
a.triggered.connect(nonpartial(self.forward))
a.setToolTip('Forward to next view')
parent.addAction(a)
self.buttons['FORWARD'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_move'),
'Pan', parent)
a.triggered.connect(nonpartial(self.pan))
a.setToolTip('Pan axes with left mouse, zoom with right')
a.setCheckable(True)
a.setShortcut('M')
a.setShortcutContext(Qt.WidgetShortcut)
parent.addAction(a)
self.addAction(a)
self.buttons['PAN'] = a
a = QtGui.QAction(get_icon('glue_zoom_to_rect'),
'Zoom', parent)
a.triggered.connect(nonpartial(self.zoom))
a.setToolTip('Zoom to rectangle')
a.setShortcut('Z')
a.setShortcutContext(Qt.WidgetShortcut)
a.setCheckable(True)
parent.addAction(a)
self.addAction(a)
self.buttons['ZOOM'] = a
#self.adj_window = None
@property
def _active(self):
return self.__active
@_active.setter
def _active(self, value):
if self.__active == value:
return
self.__active = value
if value not in [None, '']:
self.mode_activated.emit()
else:
self.mode_deactivated.emit()
def home(self, *args):
super(GlueToolbar, self).home(*args)
self.canvas.homeButton.emit()
def zoom(self, *args):
self._deactivate_custom_modes()
super(GlueToolbar, self).zoom(*args)
self._update_buttons_checked()
def pan(self, *args):
self._deactivate_custom_modes()
super(GlueToolbar, self).pan(*args)
self._update_buttons_checked()
def _deactivate_custom_modes(self):
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
if self._idDrag is not None:
self._idDrag = self.canvas.mpl_disconnect(
self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.mouse_move)
if self._idKey is not None:
self._idKey = self.canvas.mpl_disconnect(self._idKey)
self.mode = ''
def add_mode(self, mode):
parent = QtGui.QToolBar.parent(self)
def toggle():
self._custom_mode(mode)
def enable():
# turn on if not
if self._active != mode.mode_id:
self._custom_mode(mode)
action = QtGui.QAction(mode.icon, mode.action_text, parent)
action.triggered.connect(nonpartial(toggle))
parent.addAction(action)
self.__signals.extend([toggle, enable])
if mode.shortcut is not None:
action.setShortcut(mode.shortcut)
action.setShortcutContext(Qt.WidgetShortcut)
action.setToolTip(mode.tool_tip)
action.setCheckable(True)
self.buttons[mode.mode_id] = action
menu_actions = mode.menu_actions()
if len(menu_actions) > 0:
menu = QMenu(self)
for ma in mode.menu_actions():
ma.setParent(self)
menu.addAction(ma)
action.setMenu(menu)
menu.triggered.connect(nonpartial(enable))
self.addAction(action)
# bind action status to mode.enabled
def toggle(state):
action.setVisible(state)
action.setEnabled(state)
add_callback(mode, 'enabled', toggle)
return action
def set_mode(self, mode):
if self._active != mode.mode_id:
self._custom_mode(mode)
def _custom_mode(self, mode):
if self._active == mode.mode_id:
self._active = None
else:
self._active = mode.mode_id
self._deactivate_custom_modes()
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', mode.press)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', mode.move)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', mode.release)
self._idKey = self.canvas.mpl_connect(
'key_press_event', mode.key)
self.mode = mode.action_text
self.canvas.widgetlock(self)
mode.activate()
# allows grabbing of key events before clicking on plot
# XXX qt specific syntax here
try:
self.canvas.setFocus()
except AttributeError:
pass
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(None)
self.set_message(self.mode)
self._update_buttons_checked()
def press_pan(self, event):
self.pan_begin.emit()
super(GlueToolbar, self).press_pan(event)
def release_pan(self, event):
self.pan_end.emit()
super(GlueToolbar, self).release_pan(event)
def _update_buttons_checked(self):
for mode in self.buttons:
self.buttons[mode].setChecked(self._active == mode)
def set_message(self, s):
self.emit(QtCore.SIGNAL("message"), s)
parent = QtGui.QToolBar.parent(self)
if parent is None:
return
sb = parent.statusBar()
if sb is None:
return
sb.showMessage(s.replace(', ', '\n'))
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/glue_toolbar.py",
"copies": "1",
"size": "8275",
"license": "bsd-3-clause",
"hash": -1227912194831938800,
"line_mean": 31.32421875,
"line_max": 78,
"alpha_frac": 0.5749848943,
"autogenerated": false,
"ratio": 4.005324298160697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001220703125,
"num_lines": 256
} |
from __future__ import absolute_import, division, print_function
import os
import networkx as nx
from math import log
from toolz import pluck
from .convert import convert, ooc_types
from .append import append
from .utils import cls_name
# Write out as dot
def dot_graph(filename='conversions'):
# Edges from Convert
dg = nx.DiGraph()
for a, b in convert.graph.edges():
cost = convert.graph.edge[a][b]['cost']
dg.add_edge(cls_name(a), cls_name(b),
cost=cost,
penwidth=max(log(1./(cost + 0.06)), 1))
# Edges from Append
for a, b in append.funcs:
if b is not object and a != b:
dg.add_edge(cls_name(b), cls_name(a), color='blue')
# Color edges
for n in convert.graph.nodes() + list(pluck(0, append.funcs)):
if issubclass(n, tuple(ooc_types)):
dg.node[cls_name(n)]['color'] = 'red'
# Convert to pydot
p = nx.to_pydot(dg)
p.set_overlap(False)
p.set_splines(True)
with open(filename + '.dot', 'w') as f:
f.write(p.to_string())
os.system('neato -Tpdf %s.dot -o %s.pdf' % (filename, filename))
print("Writing graph to %s.pdf" % filename)
os.system('neato -Tpng %s.dot -o %s.png' % (filename, filename))
print("Writing graph to %s.png" % filename)
if __name__ == '__main__':
dot_graph()
| {
"repo_name": "cowlicks/odo",
"path": "odo/dot.py",
"copies": "10",
"size": "1363",
"license": "bsd-3-clause",
"hash": 3820760647684321300,
"line_mean": 26.26,
"line_max": 68,
"alpha_frac": 0.5964783566,
"autogenerated": false,
"ratio": 3.140552995391705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002685185185185185,
"num_lines": 50
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from dynd import nd
import datashape
from . import DDesc, Capabilities
from .dynd_data_descriptor import DyND_DDesc
from .stream_data_descriptor import Stream_DDesc
from ..optional_packages import netCDF4_is_here
if netCDF4_is_here:
import netCDF4
def get_node(f, dp):
"""Get a node in `f` file/group with a `dp` datapath (can be nested)."""
if dp.startswith('/'): dp = dp[1:]
idx = dp.find('/')
if idx >= 0:
group = f.groups[dp[:idx]]
return get_node(group, dp[idx+1:])
return f.variables[dp]
class netCDF4_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a netCDF4 dataset.
"""
def __init__(self, path, datapath, mode='r'):
self.path = path
self.datapath = datapath
self.mode = mode
@property
def dshape(self):
# This cannot be cached because the Array can change the dshape
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
odshape = datashape.from_numpy(dset.shape, dset.dtype)
return odshape
@property
def capabilities(self):
"""The capabilities for the netCDF4 arrays."""
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
appendable = isinstance(dset, netCDF4.Variable)
caps = Capabilities(
# netCDF4 arrays can be updated
immutable = False,
# netCDF4 arrays are concrete
deferred = False,
# netCDF4 arrays are persistent
persistent = True,
# netCDF4 arrays can be appended efficiently
appendable = appendable,
# netCDF4 arrays cannot be queried efficiently
queryable = False,
remote = False,
)
return caps
def dynd_arr(self):
# Positionate at the beginning of the file
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
dset = nd.array(dset[:], dtype=dset.dtype)
return dset
def __array__(self):
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
dset = dset[:]
return dset
def __len__(self):
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
arrlen = len(dset)
return arrlen
def __getitem__(self, key):
with netCDF4.Dataset(self.path, mode='r') as f:
dset = get_node(f, self.datapath)
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
dyndarr = nd.asarray(dset[key], access='readonly')
return DyND_DDesc(dyndarr)
def __setitem__(self, key, value):
# netCDF4 arrays can be updated
with netCDF4.Dataset(self.path, mode=self.mode) as f:
dset = get_node(f, self.datapath)
dset[key] = value
def __iter__(self):
f = netCDF4.Dataset(self.path, mode='r')
dset = get_node(f, self.datapath)
# Get rid of the leading dimension on which we iterate
dshape = datashape.from_numpy(dset.shape[1:], dset.dtype)
for el in dset:
if hasattr(el, "nrow"):
yield DyND_DDesc(nd.array(el[:], type=str(dshape)))
else:
yield DyND_DDesc(nd.array(el, type=str(dshape)))
f.close()
def getattr(self, name):
with netCDF4.Dataset(self.path, mode=self.mode) as f:
dset = get_node(f, self.datapath)
if hasattr(dset, 'cols'):
return DyND_DDesc(
nd.asarray(getattr(dset.cols, name)[:],
access='readonly'))
else:
raise IndexError("not an netCDF4 compound dataset")
def append(self, values):
"""Append a list of values."""
with netCDF4.Dataset(self.path, mode=self.mode) as f:
dset = get_node(f, self.datapath)
dset[len(dset):] = values
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/datadescriptor/netcdf4_data_descriptor.py",
"copies": "1",
"size": "4286",
"license": "bsd-3-clause",
"hash": 5152880818035013000,
"line_mean": 32.7480314961,
"line_max": 76,
"alpha_frac": 0.5732617825,
"autogenerated": false,
"ratio": 3.7012089810017272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47744707635017275,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from dynd import nd
import datashape
from . import DDesc, Capabilities
from ..optional_packages import tables_is_here
if tables_is_here:
import tables as tb
from .dynd_data_descriptor import DyND_DDesc
# WARNING! PyTables always returns NumPy arrays when doing indexing
# operations. This is why DyND_DDesc is used for returning
# the values here.
def hdf5_descriptor_iter(h5arr):
for i in range(len(h5arr)):
# PyTables doesn't have a convenient way to avoid collapsing
# to a scalar, this is a way to avoid that
el = np.array(h5arr[i], dtype=h5arr.dtype)
yield DyND_DDesc(nd.array(el))
h5arr._v_file.close()
class HDF5_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a HDF5 dataset.
"""
def __init__(self, path, datapath, mode='r', filters=None):
self.path = path
self.datapath = datapath
self.mode = mode
self.filters = filters
@property
def dshape(self):
# This cannot be cached because the Array can change the dshape
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
odshape = datashape.from_numpy(h5arr.shape, h5arr.dtype)
return odshape
@property
def capabilities(self):
"""The capabilities for the HDF5 arrays."""
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
appendable = isinstance(h5arr, (tb.EArray, tb.Table)),
caps = Capabilities(
# HDF5 arrays can be updated
immutable = False,
# HDF5 arrays are concrete
deferred = False,
# HDF5 arrays are persistent
persistent = True,
# HDF5 arrays can be appended efficiently (EArrays and Tables)
appendable = appendable,
remote = False,
)
return caps
def dynd_arr(self):
# Positionate at the beginning of the file
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
h5arr = nd.array(h5arr[:], dtype=h5arr.dtype)
return h5arr
def __array__(self):
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
h5arr = h5arr[:]
return h5arr
def __len__(self):
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
arrlen = len(h5arr)
return arrlen
def __getitem__(self, key):
with tb.open_file(self.path, mode='r') as f:
h5arr = f.get_node(self.datapath)
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
dyndarr = nd.asarray(h5arr[key], access='readonly')
return DyND_DDesc(dyndarr)
def __setitem__(self, key, value):
# HDF5 arrays can be updated
with tb.open_file(self.path, mode=self.mode) as f:
h5arr = f.get_node(self.datapath)
h5arr[key] = value
def __iter__(self):
f = tb.open_file(self.path, mode='r')
h5arr = f.get_node(self.datapath)
return hdf5_descriptor_iter(h5arr)
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
with tb.open_file(self.path, mode=self.mode) as f:
h5arr = f.get_node(self.datapath)
h5arr.append(values_arr.reshape(shape_vals))
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "talumbau/blaze",
"path": "blaze/datadescriptor/hdf5_data_descriptor.py",
"copies": "1",
"size": "4019",
"license": "bsd-3-clause",
"hash": 6067877637165708000,
"line_mean": 33.0593220339,
"line_max": 74,
"alpha_frac": 0.5956705648,
"autogenerated": false,
"ratio": 3.569271758436945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9650048908808265,
"avg_score": 0.0029786828857359202,
"num_lines": 118
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from matplotlib import cm
from qtpy import QtWidgets, QtGui
from glue.core.util import colorize_subsets, facet_subsets
from glue.utils.qt import load_ui
from glue.utils.qt.widget_properties import (ButtonProperty, FloatLineProperty,
ValueProperty)
from glue.utils.array import pretty_number
from glue.utils import Pointer
from glue.utils.qt import cmap2pixmap
# We do the following import to register the custom Qt Widget there
from glue.dialogs.common.qt import component_selector # pylint: disable=W0611
__all__ = ['SubsetFacet']
class SubsetFacet(QtWidgets.QDialog):
log = ButtonProperty('ui.checkbox_log')
vmin = FloatLineProperty('ui.value_min')
vmax = FloatLineProperty('ui.value_max')
steps = ValueProperty('ui.value_n_subsets')
data = Pointer('ui.component_selector.data')
component = Pointer('ui.component_selector.component')
def __init__(self, collect, default=None, parent=None):
"""Create a new dialog for subset faceting
:param collect: The :class:`~glue.core.data_collection.DataCollection` to use
:param default: The default dataset in the collection (optional)
"""
super(SubsetFacet, self).__init__(parent=parent)
self.ui = load_ui('subset_facet.ui', self,
directory=os.path.dirname(__file__))
self.ui.setWindowTitle("Subset Facet")
self._collect = collect
self.ui.component_selector.setup(self._collect)
if default is not None:
self.ui.component_selector.data = default
val = QtGui.QDoubleValidator(-1e100, 1e100, 4, None)
self.ui.component_selector.component_changed.connect(self._set_limits)
combo = self.ui.color_scale
for cmap in [cm.cool, cm.RdYlBu, cm.RdYlGn, cm.RdBu, cm.Purples]:
combo.addItem(QtGui.QIcon(cmap2pixmap(cmap)), cmap.name, cmap)
def _set_limits(self):
data = self.ui.component_selector.data
cid = self.ui.component_selector.component
vals = data[cid]
wmin = self.ui.value_min
wmax = self.ui.value_max
wmin.setText(pretty_number(np.nanmin(vals)))
wmax.setText(pretty_number(np.nanmax(vals)))
@property
def cmap(self):
combo = self.ui.color_scale
index = combo.currentIndex()
return combo.itemData(index)
def _apply(self):
try:
lo, hi = self.vmin, self.vmax
except ValueError:
return # limits not set. Abort
if not np.isfinite(lo) or not np.isfinite(hi):
return
subsets = facet_subsets(self._collect, self.component, lo=lo, hi=hi,
steps=self.steps, log=self.log)
colorize_subsets(subsets, self.cmap)
@classmethod
def facet(cls, collect, default=None, parent=None):
"""Class method to create facted subsets
The arguments are the same as __init__
"""
self = cls(collect, parent=parent, default=default)
value = self.exec_()
if value == QtWidgets.QDialog.Accepted:
self._apply()
| {
"repo_name": "saimn/glue",
"path": "glue/dialogs/subset_facet/qt/subset_facet.py",
"copies": "4",
"size": "3234",
"license": "bsd-3-clause",
"hash": -6001564366702185000,
"line_mean": 33.0421052632,
"line_max": 85,
"alpha_frac": 0.6428571429,
"autogenerated": false,
"ratio": 3.8091872791519434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6452044422051944,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import json
from PIL import Image
from ..datasets import OTB
from ..utils.metrics import rect_iou, center_error
from ..utils.viz import show_frame
class ExperimentOTB(object):
r"""Experiment pipeline and evaluation toolkit for OTB dataset.
Args:
root_dir (string): Root directory of OTB dataset.
version (integer or string): Specify the benchmark version, specify as one of
``2013``, ``2015``, ``tb50`` and ``tb100``. Default is ``2015``.
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, version=2015,
result_dir='results', report_dir='reports'):
super(ExperimentOTB, self).__init__()
self.dataset = OTB(root_dir, version, download=True)
self.result_dir = os.path.join(result_dir, 'OTB' + str(version))
self.report_dir = os.path.join(report_dir, 'OTB' + str(version))
# as nbins_iou increases, the success score
# converges to the average overlap (AO)
self.nbins_iou = 21
self.nbins_ce = 51
def run(self, tracker, visualize=False):
print('Running tracker %s on %s...' % (
tracker.name, type(self.dataset).__name__))
# loop over the complete dataset
for s, (img_files, anno) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, '%s.txt' % seq_name)
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
# tracking loop
boxes, times = tracker.track(
img_files, anno[0, :], visualize=visualize)
assert len(boxes) == len(anno)
# record results
self._record(record_file, boxes, times)
def report(self, tracker_names, plot_curves=True):
assert isinstance(tracker_names, (list, tuple))
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
report_file = os.path.join(report_dir, 'performance.json')
performance = {}
for name in tracker_names:
print('Evaluating', name)
seq_num = len(self.dataset)
succ_curve = np.zeros((seq_num, self.nbins_iou))
prec_curve = np.zeros((seq_num, self.nbins_ce))
speeds = np.zeros(seq_num)
performance.update({name: {
'overall': {},
'seq_wise': {}}})
for s, (_, anno) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
record_file = os.path.join(
self.result_dir, name, '%s.txt' % seq_name)
boxes = np.loadtxt(record_file, delimiter=',')
boxes[0] = anno[0]
if not (len(boxes) == len(anno)):
print('warning: %s anno donnot match boxes'%seq_name)
len_min = min(len(boxes),len(anno))
boxes = boxes[:len_min]
anno = anno[:len_min]
assert len(boxes) == len(anno)
ious, center_errors = self._calc_metrics(boxes, anno)
succ_curve[s], prec_curve[s] = self._calc_curves(ious, center_errors)
# calculate average tracking speed
time_file = os.path.join(
self.result_dir, name, 'times/%s_time.txt' % seq_name)
if os.path.isfile(time_file):
times = np.loadtxt(time_file)
times = times[times > 0]
if len(times) > 0:
speeds[s] = np.mean(1. / times)
# store sequence-wise performance
performance[name]['seq_wise'].update({seq_name: {
'success_curve': succ_curve[s].tolist(),
'precision_curve': prec_curve[s].tolist(),
'success_score': np.mean(succ_curve[s]),
'precision_score': prec_curve[s][20],
'success_rate': succ_curve[s][self.nbins_iou // 2],
'speed_fps': speeds[s] if speeds[s] > 0 else -1}})
succ_curve = np.mean(succ_curve, axis=0)
prec_curve = np.mean(prec_curve, axis=0)
succ_score = np.mean(succ_curve)
prec_score = prec_curve[20]
succ_rate = succ_curve[self.nbins_iou // 2]
if np.count_nonzero(speeds) > 0:
avg_speed = np.sum(speeds) / np.count_nonzero(speeds)
else:
avg_speed = -1
# store overall performance
performance[name]['overall'].update({
'success_curve': succ_curve.tolist(),
'precision_curve': prec_curve.tolist(),
'success_score': succ_score,
'precision_score': prec_score,
'success_rate': succ_rate,
'speed_fps': avg_speed})
# report the performance
with open(report_file, 'w') as f:
json.dump(performance, f, indent=4)
# plot precision and success curves
if plot_curves:
self.plot_curves(tracker_names)
return performance
def show(self, tracker_names, seq_names=None, play_speed=1):
if seq_names is None:
seq_names = self.dataset.seq_names
elif isinstance(seq_names, str):
seq_names = [seq_names]
assert isinstance(tracker_names, (list, tuple))
assert isinstance(seq_names, (list, tuple))
play_speed = int(round(play_speed))
assert play_speed > 0
for s, seq_name in enumerate(seq_names):
print('[%d/%d] Showing results on %s...' % (
s + 1, len(seq_names), seq_name))
# load all tracking results
records = {}
for name in tracker_names:
record_file = os.path.join(
self.result_dir, name, '%s.txt' % seq_name)
records[name] = np.loadtxt(record_file, delimiter=',')
# loop over the sequence and display results
img_files, anno = self.dataset[seq_name]
for f, img_file in enumerate(img_files):
if not f % play_speed == 0:
continue
image = Image.open(img_file)
boxes = [anno[f]] + [
records[name][f] for name in tracker_names]
show_frame(image, boxes,
legends=['GroundTruth'] + tracker_names,
colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
'orange', 'purple', 'brown', 'pink'])
def _record(self, record_file, boxes, times):
# record bounding boxes
record_dir = os.path.dirname(record_file)
if not os.path.isdir(record_dir):
os.makedirs(record_dir)
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
while not os.path.exists(record_file):
print('warning: recording failed, retrying...')
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
print(' Results recorded at', record_file)
# record running times
time_dir = os.path.join(record_dir, 'times')
if not os.path.isdir(time_dir):
os.makedirs(time_dir)
time_file = os.path.join(time_dir, os.path.basename(
record_file).replace('.txt', '_time.txt'))
np.savetxt(time_file, times, fmt='%.8f')
def _calc_metrics(self, boxes, anno):
# can be modified by children classes
ious = rect_iou(boxes, anno)
center_errors = center_error(boxes, anno)
return ious, center_errors
def _calc_curves(self, ious, center_errors):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, self.nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, self.nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = np.mean(bin_ce, axis=0)
return succ_curve, prec_curve
def plot_curves(self, tracker_names):
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
assert os.path.exists(report_dir), \
'No reports found. Run "report" first' \
'before plotting curves.'
report_file = os.path.join(report_dir, 'performance.json')
assert os.path.exists(report_file), \
'No reports found. Run "report" first' \
'before plotting curves.'
# load pre-computed performance
with open(report_file) as f:
performance = json.load(f)
succ_file = os.path.join(report_dir, 'success_plots.png')
prec_file = os.path.join(report_dir, 'precision_plots.png')
key = 'overall'
# markers
markers = ['-', '--', '-.']
markers = [c + m for m in markers for c in [''] * 10]
# sort trackers by success score
tracker_names = list(performance.keys())
succ = [t[key]['success_score'] for t in performance.values()]
inds = np.argsort(succ)[::-1]
tracker_names = [tracker_names[i] for i in inds]
# plot success curves
thr_iou = np.linspace(0, 1, self.nbins_iou)
fig, ax = plt.subplots()
lines = []
legends = []
for i, name in enumerate(tracker_names):
line, = ax.plot(thr_iou,
performance[name][key]['success_curve'],
markers[i % len(markers)])
lines.append(line)
legends.append('%s: [%.3f]' % (name, performance[name][key]['success_score']))
matplotlib.rcParams.update({'font.size': 7.4})
legend = ax.legend(lines, legends, loc='center left',
bbox_to_anchor=(1, 0.5))
matplotlib.rcParams.update({'font.size': 9})
ax.set(xlabel='Overlap threshold',
ylabel='Success rate',
xlim=(0, 1), ylim=(0, 1),
title='Success plots of OPE')
ax.grid(True)
fig.tight_layout()
print('Saving success plots to', succ_file)
fig.savefig(succ_file,
bbox_extra_artists=(legend,),
bbox_inches='tight',
dpi=300)
# sort trackers by precision score
tracker_names = list(performance.keys())
prec = [t[key]['precision_score'] for t in performance.values()]
inds = np.argsort(prec)[::-1]
tracker_names = [tracker_names[i] for i in inds]
# plot precision curves
thr_ce = np.arange(0, self.nbins_ce)
fig, ax = plt.subplots()
lines = []
legends = []
for i, name in enumerate(tracker_names):
line, = ax.plot(thr_ce,
performance[name][key]['precision_curve'],
markers[i % len(markers)])
lines.append(line)
legends.append('%s: [%.3f]' % (name, performance[name][key]['precision_score']))
matplotlib.rcParams.update({'font.size': 7.4})
legend = ax.legend(lines, legends, loc='center left',
bbox_to_anchor=(1, 0.5))
matplotlib.rcParams.update({'font.size': 9})
ax.set(xlabel='Location error threshold',
ylabel='Precision',
xlim=(0, thr_ce.max()), ylim=(0, 1),
title='Precision plots of OPE')
ax.grid(True)
fig.tight_layout()
print('Saving precision plots to', prec_file)
fig.savefig(prec_file, dpi=300)
| {
"repo_name": "got-10k/toolkit",
"path": "got10k/experiments/otb.py",
"copies": "1",
"size": "12541",
"license": "mit",
"hash": 4123912430474350000,
"line_mean": 39.7175324675,
"line_max": 92,
"alpha_frac": 0.5349653138,
"autogenerated": false,
"ratio": 3.8375152998776008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48724806136776005,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
from gensim.models import KeyedVectors
from string import ascii_lowercase, punctuation
# Dataset
PROJECT_NAME = "Quora Question Pairs"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset", PROJECT_NAME)
TRAIN_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "train.csv")
TEST_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "test.csv")
EMBEDDING_FILE_PATH = os.path.join(PROJECT_FOLDER_PATH, "GoogleNews-vectors-negative300.bin")
def correct_typo(word, word_to_index_dict, known_typo_dict, min_word_length=8):
def get_candidate_word_list(word):
# https://www.kaggle.com/cpmpml/spell-checker-using-word2vec/notebook
left_word_with_right_word_list = [(word[:index], word[index:]) for index in range(len(word) + 1)]
deleted_word_list = [left_word + right_word[1:] for left_word, right_word in left_word_with_right_word_list if right_word]
transposed_word_list = [left_word + right_word[1] + right_word[0] + right_word[2:] for left_word, right_word in left_word_with_right_word_list if len(right_word) > 1]
replaced_word_list = [left_word + character + right_word[1:] for left_word, right_word in left_word_with_right_word_list if right_word for character in ascii_lowercase]
inserted_word_list = [left_word + character + right_word for left_word, right_word in left_word_with_right_word_list for character in ascii_lowercase]
return list(set(deleted_word_list + transposed_word_list + replaced_word_list + inserted_word_list))
if len(word) < min_word_length or word in word_to_index_dict:
return word
if word in known_typo_dict:
return known_typo_dict[word]
candidate_word_list = get_candidate_word_list(word)
candidate_word_with_index_array = np.array([(candidate_word, word_to_index_dict[candidate_word]) for candidate_word in candidate_word_list if candidate_word in word_to_index_dict])
if len(candidate_word_with_index_array) == 0:
selected_candidate_word = word
else:
selected_candidate_word = candidate_word_with_index_array[np.argmin(candidate_word_with_index_array[:, -1].astype(np.int))][0]
print("Replacing {} with {} ...".format(word, selected_candidate_word))
known_typo_dict[word] = selected_candidate_word
return selected_candidate_word
def clean_sentence(original_sentence, word_to_index_dict, known_typo_dict):
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
try:
# Convert to lower case
cleaned_sentence = " ".join(str(original_sentence).lower().split())
# Remove punctuation
cleaned_sentence = "".join([character for character in cleaned_sentence if character not in punctuation])
# Correct simple typos
cleaned_sentence = " ".join([correct_typo(word, word_to_index_dict, known_typo_dict) for word in cleaned_sentence.split()])
cleaned_sentence = " ".join([word for word in cleaned_sentence.split()])
return cleaned_sentence
except Exception as exception:
print("Exception for {}: {}".format(original_sentence, exception))
return original_sentence
def process_file(original_file_path, word_to_index_dict, known_typo_dict):
print("Loading {} ...".format(original_file_path))
file_content = pd.read_csv(original_file_path, encoding="utf-8")
print("Cleaning sentences ...")
file_content["question1"] = file_content["question1"].apply(lambda original_sentence: clean_sentence(original_sentence, word_to_index_dict, known_typo_dict))
file_content["question2"] = file_content["question2"].apply(lambda original_sentence: clean_sentence(original_sentence, word_to_index_dict, known_typo_dict))
print("Saving processed file ...")
file_content.to_csv(original_file_path, index=False)
def run():
print("Initiating word2vec ...")
word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE_PATH, binary=True)
word_to_index_dict = dict([(word, index) for index, word in enumerate(word2vec.index2word)])
print("word2vec contains {} unique words.".format(len(word_to_index_dict)))
print("Processing text files ...")
known_typo_dict = {}
process_file(TRAIN_FILE_PATH, word_to_index_dict, known_typo_dict)
process_file(TEST_FILE_PATH, word_to_index_dict, known_typo_dict)
print("All done!")
if __name__ == "__main__":
run()
| {
"repo_name": "nixingyang/Kaggle-Face-Verification",
"path": "Quora Question Pairs/text_cleaning.py",
"copies": "1",
"size": "4509",
"license": "mit",
"hash": 4252468084966441500,
"line_mean": 51.4302325581,
"line_max": 184,
"alpha_frac": 0.7039254824,
"autogenerated": false,
"ratio": 3.3775280898876403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9563534591333043,
"avg_score": 0.0035837961909193847,
"num_lines": 86
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core.layer_artist import LayerArtistBase
from glue.core import message as msg
from glue.core import Data
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.qt.tool import CheckableTool
from glue.core.subset import ElementSubsetState
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.state import lookup_class_with_patches
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt4_color, messagebox_on_error
from glue.core.exceptions import IncompatibleAttribute
__all__ = ['TableViewer', 'TableLayerArtist']
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_hidden = False
self.order = np.arange(self._data.shape[0])
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self.dataChanged.emit(top_left, bottom_right)
self.layoutChanged.emit()
@property
def columns(self):
if self.show_hidden:
return self._data.components
else:
return self._data.visible_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self._data.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order[section])
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
c = self.columns[index.column()]
idx = self.order[index.row()]
comp = self._data.get_component(c)
if comp.categorical:
comp = comp.labels
else:
comp = comp.data
if isinstance(comp[idx], bytes):
return comp[idx].decode('ascii')
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order[index.row()]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if isinstance(layer_artist.layer, Data):
continue
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt4_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
if comp.categorical:
self.order = np.argsort(comp.labels)
else:
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self.layoutChanged.emit()
class TableLayerArtist(LayerArtistBase):
def __init__(self, layer, table_viewer):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(layer)
def redraw(self):
self._table_viewer.model.data_changed()
def update(self):
pass
def clear(self):
pass
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableViewer(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
tools = ['table:rowselect']
def __init__(self, session, parent=None, widget=None):
super(TableViewer, self).__init__(session, parent)
self.ui = load_ui('data_viewer.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setSectionResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
hdr.setSectionResizeMode(hdr.Interactive)
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableViewer, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = EditSubsetMode()
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def register_to_hub(self, hub):
super(TableViewer, self).register_to_hub(hub)
def dfilter(x):
return x.sender.data is self.data
hub.subscribe(self, msg.SubsetCreateMessage,
handler=self._refresh,
filter=dfilter)
hub.subscribe(self, msg.SubsetUpdateMessage,
handler=self._refresh,
filter=dfilter)
hub.subscribe(self, msg.SubsetDeleteMessage,
handler=self._refresh,
filter=dfilter)
hub.subscribe(self, msg.DataUpdateMessage,
handler=self._refresh,
filter=dfilter)
hub.subscribe(self, msg.ComponentsChangedMessage,
handler=self._refresh,
filter=dfilter)
hub.subscribe(self, msg.NumericalDataChangedMessage,
handler=self._refresh,
filter=dfilter)
def unregister(self, hub):
super(TableViewer, self).unregister(hub)
hub.unsubscribe_all(self)
def _refresh(self, msg=None):
self._sync_layers()
self.model.data_changed()
def _sync_layers(self):
for layer_artist in self.layers:
if layer_artist.layer is not self.data and layer_artist.layer not in self.data.subsets:
self._layer_artist_container.remove(layer_artist)
if self.data not in self._layer_artist_container:
self._layer_artist_container.append(TableLayerArtist(self.data, self))
for subset in self.data.subsets:
if subset not in self._layer_artist_container:
self._layer_artist_container.append(TableLayerArtist(subset, self))
@messagebox_on_error("Failed to add data")
def add_data(self, data):
self.data = data
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
self._sync_layers()
return True
def add_subset(self, subset):
return True
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
super(TableViewer, self).closeEvent(event)
d = Data(x=[0])
self.ui.table.setModel(DataTableModel(d))
event.accept()
def restore_layers(self, rec, context):
# For now this is a bit of a hack, we assume that all subsets saved
# for this viewer are from dataset, so we just get Data object
# then just sync the layers.
for layer in rec:
c = lookup_class_with_patches(layer.pop('_type')) # noqa
props = dict((k, context.object(v)) for k, v in layer.items())
layer = props['layer']
if isinstance(layer, Data):
self.add_data(layer)
else:
self.add_data(layer.data)
break
self._sync_layers()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/table/qt/data_viewer.py",
"copies": "1",
"size": "10792",
"license": "bsd-3-clause",
"hash": -1967949678296990500,
"line_mean": 32.9371069182,
"line_max": 99,
"alpha_frac": 0.610544848,
"autogenerated": false,
"ratio": 4.090978013646702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5201522861646702,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from qtpy import PYQT5
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core.layer_artist import LayerArtistBase
from glue.core import message as msg
from glue.core import Data
from glue.utils import nonpartial
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.qt.tool import CheckableTool
from glue.core.subset import ElementSubsetState
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.state import lookup_class_with_patches
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt4_color
from glue.core.exceptions import IncompatibleAttribute
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_hidden = False
self.order = np.arange(self._data.shape[0])
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self.dataChanged.emit(top_left, bottom_right)
@property
def columns(self):
if self.show_hidden:
return self._data.components
else:
return self._data.visible_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self._data.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order[section])
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
c = self.columns[index.column()]
idx = self.order[index.row()]
comp = self._data.get_component(c)
if comp.categorical:
comp = comp.labels
else:
comp = comp.data
if isinstance(comp[idx], bytes):
return comp[idx].decode('ascii')
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order[index.row()]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt4_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
if comp.categorical:
self.order = np.argsort(comp.labels)
else:
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self.layoutChanged.emit()
class TableLayerArtist(LayerArtistBase):
def __init__(self, layer, table_viewer):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(layer)
def redraw(self):
self._table_viewer.model.data_changed()
def update(self):
pass
def clear(self):
pass
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableWidget(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
tools = ['table:rowselect']
def __init__(self, session, parent=None, widget=None):
super(TableWidget, self).__init__(session, parent)
self.ui = load_ui('viewer_widget.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
if PYQT5:
hdr.setSectionResizeMode(hdr.Interactive)
else:
hdr.setResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
if PYQT5:
hdr.setSectionResizeMode(hdr.Interactive)
else:
hdr.setResizeMode(hdr.Interactive)
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableWidget, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = EditSubsetMode()
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def register_to_hub(self, hub):
super(TableWidget, self).register_to_hub(hub)
def dfilter(x):
return x.sender.data is self.data
hub.subscribe(self, msg.SubsetCreateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.SubsetUpdateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.SubsetDeleteMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
hub.subscribe(self, msg.DataUpdateMessage,
handler=nonpartial(self._refresh),
filter=dfilter)
def _refresh(self):
self._sync_layers()
self.model.data_changed()
def _sync_layers(self):
# For now we don't show the data in the list because it always has to
# be shown
for layer_artist in self.layers:
if layer_artist.layer not in self.data.subsets:
self._layer_artist_container.remove(layer_artist)
for subset in self.data.subsets:
if subset not in self._layer_artist_container:
self._layer_artist_container.append(TableLayerArtist(subset, self))
def add_data(self, data):
self.data = data
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
self._sync_layers()
return True
def add_subset(self, subset):
return True
def unregister(self, hub):
pass
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
d = Data(x=[0])
self.ui.table.setModel(DataTableModel(d))
event.accept()
def restore_layers(self, rec, context):
# For now this is a bit of a hack, we assume that all subsets saved
# for this viewer are from dataset, so we just get Data object
# then just sync the layers.
for layer in rec:
c = lookup_class_with_patches(layer.pop('_type'))
props = dict((k, context.object(v)) for k, v in layer.items())
layer = props['layer']
self.add_data(layer.data)
break
self._sync_layers()
| {
"repo_name": "saimn/glue",
"path": "glue/viewers/table/qt/viewer_widget.py",
"copies": "1",
"size": "10223",
"license": "bsd-3-clause",
"hash": 3724234378972284400,
"line_mean": 32.0841423948,
"line_max": 92,
"alpha_frac": 0.6126381688,
"autogenerated": false,
"ratio": 4.071286340103544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5183924508903545,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy
import json
import sys
import re
import scipy.signal
import logging
import ast
import inspect
import collections
import numbers
try:
import cPickle as pickle
except:
import pickle
from collections import namedtuple, OrderedDict
import time
import mxnet as mx
import mxnet.ndarray as nd
_ctx = mx.cpu()
_numpy_rng = numpy.random.RandomState(123456)
def get_default_ctx():
return _ctx
def get_numpy_rng():
return _numpy_rng
def get_saving_path(prefix="", epoch=None):
sym_saving_path = os.path.join('%s-symbol.json' % prefix)
if epoch is not None:
param_saving_path = os.path.join('%s-%05d.params' % (prefix, epoch))
else:
param_saving_path = os.path.join('%s.params' % prefix)
misc_saving_path = os.path.join('%s-misc.json' % prefix)
return sym_saving_path, param_saving_path, misc_saving_path
def logging_config(name=None, level=logging.DEBUG, console_level=logging.DEBUG):
if name is None:
name = inspect.stack()[1][1].split('.')[0]
folder = os.path.join(os.getcwd(), name)
if not os.path.exists(folder):
os.makedirs(folder)
logpath = os.path.join(folder, name + ".log")
print("All Logs will be saved to %s" %logpath)
logging.root.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logging.root.addHandler(logfile)
#TODO Update logging patterns in other files
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logging.root.addHandler(logconsole)
return folder
def save_params(dir_path=os.curdir, epoch=None, name="", params=None, aux_states=None,
ctx=mx.cpu()):
prefix = os.path.join(dir_path, name)
_, param_saving_path, _ = get_saving_path(prefix, epoch)
if not os.path.isdir(dir_path) and not (dir_path == ""):
os.makedirs(dir_path)
save_dict = {('arg:%s' % k): v.copyto(ctx) for k, v in params.items()}
save_dict.update({('aux:%s' % k): v.copyto(ctx) for k, v in aux_states.items()})
nd.save(param_saving_path, save_dict)
return param_saving_path
def save_misc(dir_path=os.curdir, epoch=None, name="", content=None):
prefix = os.path.join(dir_path, name)
_, _, misc_saving_path = get_saving_path(prefix, epoch)
with open(misc_saving_path, 'w') as fp:
json.dump(content, fp)
return misc_saving_path
def quick_save_json(dir_path=os.curdir, file_name="", content=None):
file_path = os.path.join(dir_path, file_name)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
with open(file_path, 'w') as fp:
json.dump(content, fp)
logging.info('Save json into %s' % file_path)
def safe_eval(expr):
if type(expr) is str:
return ast.literal_eval(expr)
else:
return expr
def norm_clipping(params_grad, threshold):
assert isinstance(params_grad, dict)
norm_val = numpy.sqrt(sum([nd.norm(grad).asnumpy()[0]**2 for grad in params_grad.values()]))
# print('grad norm: %g' % norm_val)
ratio = 1.0
if norm_val > threshold:
ratio = threshold / norm_val
for grad in params_grad.values():
grad *= ratio
return norm_val
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
"""
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret
def sample_normal(mean, var, rng):
"""Sample from independent normal distributions
Each element is an independent normal distribution.
Parameters
----------
mean : numpy.ndarray
Means of the normal distribution. Shape --> (batch_num, sample_dim)
var : numpy.ndarray
Variance of the normal distribution. Shape --> (batch_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
The sampling result. Shape --> (batch_num, sample_dim)
"""
ret = numpy.sqrt(var) * rng.randn(*mean.shape) + mean
return ret
def sample_mog(prob, mean, var, rng):
"""Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim)
"""
gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32)
mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :]
var = var[numpy.arange(mean.shape[0]), gaussian_inds, :]
ret = sample_normal(mean=mean, var=var, rng=rng)
return ret
def npy_softmax(x, axis=1):
e_x = numpy.exp(x - numpy.max(x, axis=axis, keepdims=True))
out = e_x / e_x.sum(axis=axis, keepdims=True)
return out
def npy_sigmoid(x):
return 1/(1 + numpy.exp(-x))
def npy_onehot(x, num):
ret = numpy.zeros(shape=(x.size, num))
ret[numpy.arange(x.size), x.ravel()] = 1
ret = ret.reshape(x.shape + (num,))
return ret
def npy_binary_entropy(prediction, target):
assert prediction.shape == target.shape
return - (numpy.log(prediction + 1E-9) * target +
numpy.log(1 - prediction + 1E-9) * (1 - target)).sum()
def block_all(sym_list):
return [mx.symbol.BlockGrad(sym) for sym in sym_list]
def load_params(dir_path="", epoch=None, name=""):
prefix = os.path.join(dir_path, name)
_, param_loading_path, _ = get_saving_path(prefix, epoch)
while not os.path.isfile(param_loading_path):
logging.info("in load_param, %s Not Found!" % param_loading_path)
time.sleep(60)
save_dict = nd.load(param_loading_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params, param_loading_path
def load_misc(dir_path="", epoch=None, name=""):
prefix = os.path.join(dir_path, name)
_, _, misc_saving_path = get_saving_path(prefix, epoch)
with open(misc_saving_path, 'r') as fp:
misc = json.load(fp)
return misc
def load_npz(path):
with numpy.load(path) as data:
ret = {k: data[k] for k in data.keys()}
return ret
def discount_cumsum(x, discount):
# See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering
# Here, we have y[t] - discount*y[t+1] = x[t]
# or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
return scipy.signal.lfilter([1], [1, -discount], x[::-1], axis=0)[::-1]
def discount_return(x, discount):
return numpy.sum(x * (discount ** numpy.arange(len(x))))
def update_on_kvstore(kv, params, params_grad):
for ind, k in enumerate(params.keys()):
kv.push(ind, params_grad[k], priority=-ind)
kv.pull(ind, params[k], priority=-ind)
def parse_ctx(ctx_args):
ctx = re.findall('([a-z]+)(\d*)', ctx_args)
ctx = [(device, int(num)) if len(num) > 0 else (device, 0) for device, num in ctx]
return ctx
def get_npy_list(ndarray_list):
"""Get a numpy-array list from a ndarray list
Parameters
----------
ndarray_list : list of NDArray
Returns
-------
ret : list of numpy.ndarray
"""
ret = [v.asnumpy() for v in ndarray_list]
return ret
def get_sym_list(syms, default_names=None, default_shapes=None):
if syms is None and default_names is not None:
if default_shapes is not None:
return [mx.sym.Variable(name=name, shape=shape) for (name, shape)
in zip(default_names, default_shapes)]
else:
return [mx.sym.Variable(name=name) for name in default_names]
assert isinstance(syms, (list, tuple, mx.symbol.Symbol))
if isinstance(syms, (list, tuple)):
if default_names is not None and len(syms) != len(default_names):
raise ValueError("Size of symbols do not match expectation. Received %d, Expected %d. "
"syms=%s, names=%s" %(len(syms), len(default_names),
str(list(sym.name for sym in syms)),
str(default_names)))
return list(syms)
else:
if default_names is not None and len(default_names) != 1:
raise ValueError("Size of symbols do not match expectation. Received 1, Expected %d. "
"syms=%s, names=%s"
% (len(default_names), str([syms.name]), str(default_names)))
return [syms]
def get_numeric_list(values, typ, expected_len=None):
if isinstance(values, numbers.Number):
if expected_len is not None:
return [typ(values)] * expected_len
else:
return [typ(values)]
elif isinstance(values, (list, tuple)):
if expected_len is not None:
assert len(values) == expected_len
try:
ret = [typ(value) for value in values]
return ret
except(ValueError):
print("Need iterable with numeric elements, received: %s" %str(values))
sys.exit(1)
else:
raise ValueError("Unaccepted value type, values=%s" %str(values))
def get_int_list(values, expected_len=None):
return get_numeric_list(values, numpy.int32, expected_len)
def get_float_list(values, expected_len=None):
return get_numeric_list(values, numpy.float32, expected_len)
def get_bucket_key(bucket_kwargs):
assert isinstance(bucket_kwargs, dict)
return tuple(bucket_kwargs.items())
| {
"repo_name": "danithaca/mxnet",
"path": "example/reinforcement-learning/dqn/utils.py",
"copies": "16",
"size": "10786",
"license": "apache-2.0",
"hash": -5272291885398858000,
"line_mean": 31.1011904762,
"line_max": 103,
"alpha_frac": 0.6153346931,
"autogenerated": false,
"ratio": 3.5433639947437583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013484609149240576,
"num_lines": 336
} |
from __future__ import absolute_import, division, print_function
import os
import os.path
import re
from subprocess import Popen, PIPE
from changes.constants import PROJECT_ROOT
from changes.db.utils import create_or_update, get_or_create
from changes.models import Author, Revision
class CommandError(Exception):
def __init__(self, cmd, retcode, stdout=None, stderr=None):
self.cmd = cmd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def __unicode__(self):
return '%s returned %d:\nSTDOUT: %r\nSTDERR: %r' % (
self.cmd, self.retcode, self.stdout, self.stderr)
def __str__(self):
return self.__unicode__().encode('utf-8')
class BufferParser(object):
def __init__(self, fp, delim):
self.fp = fp
self.delim = delim
def __iter__(self):
chunk_buffer = []
for chunk in self.fp:
while chunk.find(self.delim) != -1:
d_pos = chunk.find(self.delim)
chunk_buffer.append(chunk[:d_pos])
yield ''.join(chunk_buffer)
chunk_buffer = []
chunk = chunk[d_pos + 1:]
if chunk:
chunk_buffer.append(chunk)
if chunk_buffer:
yield ''.join(chunk_buffer)
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect')
def __init__(self, path, url, username=None):
self.path = path
self.url = url
self.username = username
self._path_exists = None
def get_default_env(self):
return {}
def run(self, *args, **kwargs):
if self.exists():
kwargs.setdefault('cwd', self.path)
env = os.environ.copy()
for key, value in self.get_default_env().iteritems():
env.setdefault(key, value)
env.setdefault('CHANGES_SSH_REPO', self.url)
for key, value in kwargs.pop('env', {}):
env[key] = value
kwargs['env'] = env
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
proc = Popen(*args, **kwargs)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise CommandError(args[0], proc.returncode, stdout, stderr)
return stdout
def exists(self):
return os.path.exists(self.path)
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def log(self, parent=None, offset=0, limit=100):
raise NotImplementedError
def export(self, id):
raise NotImplementedError
def get_revision(self, id):
"""
Return a ``Revision`` given by ``id`.
"""
return self.log(parent=id, limit=1).next()
def get_default_revision(self):
raise NotImplementedError
def is_child_parent(self, child_in_question, parent_in_question):
raise NotImplementedError
class RevisionResult(object):
parents = None
branches = None
def __init__(self, id, message, author, author_date, committer=None,
committer_date=None, parents=None, branches=None):
self.id = id
self.message = message
self.author = author
self.author_date = author_date
self.committer = committer or author
self.committer_date = committer_date or author_date
if parents is not None:
self.parents = parents
if branches is not None:
self.branches = branches
def __repr__(self):
return '<%s: id=%r author=%r subject=%r>' % (
type(self).__name__, self.id, self.author, self.subject)
def _get_author(self, value):
match = re.match(r'^(.+) <([^>]+)>$', value)
if not match:
if '@' in value:
name, email = value, value
else:
name, email = value, '{0}@localhost'.format(value)
else:
name, email = match.group(1), match.group(2)
author, _ = get_or_create(Author, where={
'email': email,
}, defaults={
'name': name,
})
return author
@property
def subject(self):
return self.message.splitlines()[0]
def save(self, repository):
author = self._get_author(self.author)
if self.author == self.committer:
committer = author
else:
committer = self._get_author(self.committer)
revision, created = create_or_update(Revision, where={
'repository': repository,
'sha': self.id,
}, values={
'author': author,
'committer': committer,
'message': self.message,
'parents': self.parents,
'branches': self.branches,
'date_created': self.author_date,
'date_committed': self.committer_date,
})
return (revision, created)
| {
"repo_name": "alex/changes",
"path": "changes/vcs/base.py",
"copies": "1",
"size": "4965",
"license": "apache-2.0",
"hash": -4475856234694951400,
"line_mean": 26.4309392265,
"line_max": 72,
"alpha_frac": 0.5597180262,
"autogenerated": false,
"ratio": 4.0763546798029555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 181
} |
from __future__ import absolute_import, division, print_function
import os
import os.path
import re
import shutil
import tempfile
from subprocess import Popen, PIPE, check_call, CalledProcessError
from typing import Any, List, Optional, Set, Union # NOQA
from changes.constants import PROJECT_ROOT
from changes.db.utils import create_or_update, get_or_create, try_create
from changes.models.author import Author
from changes.models.revision import Revision
from changes.models.source import Source
from changes.config import statsreporter
from changes.utils.diff_parser import DiffParser
from time import time
class CommandError(Exception):
def __init__(self, cmd, retcode, stdout=None, stderr=None):
self.cmd = cmd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def __unicode__(self):
return '%s returned %d:\nSTDOUT: %r\nSTDERR: %r' % (
self.cmd, self.retcode, self.stdout, self.stderr)
def __str__(self):
return self.__unicode__().encode('utf-8')
class UnknownRevision(CommandError):
"""Indicates that an operation was attempted on a
revision that doesn't appear to exist."""
pass
class UnknownChildRevision(UnknownRevision):
"""Indicates that VCS was queried for a parent-child relationship with a
child revision that doesn't appear to exist."""
pass
class UnknownParentRevision(UnknownRevision):
"""Indicates that VCS was queried for a parent-child relationship with a
parent revision that doesn't appear to exist."""
pass
class ConcurrentUpdateError(CommandError):
"""Indicates that a command failed because a vcs update is running."""
pass
class InvalidDiffError(Exception):
"""This is used when a diff is invalid and fails to apply. It is NOT
a subclass of CommandError, as it is not a vcs command"""
pass
class ContentReadError(Exception):
"""Indicates that an attempt to read the contents of a file in the repo failed.
"""
pass
class MissingFileError(ContentReadError):
"""Indicates that an attempt to read the contents of a file in the repo failed
because the file doesn't appear to exist.
"""
pass
class BufferParser(object):
def __init__(self, fp, delim):
self.fp = fp
self.delim = delim
def __iter__(self):
chunk_buffer = []
for chunk in self.fp:
while chunk.find(self.delim) != -1:
d_pos = chunk.find(self.delim)
chunk_buffer.append(chunk[:d_pos])
yield ''.join(chunk_buffer)
chunk_buffer = []
chunk = chunk[d_pos + 1:]
if chunk:
chunk_buffer.append(chunk)
if chunk_buffer:
yield ''.join(chunk_buffer)
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect')
def __init__(self, path, url, username=None):
self.path = path
self.url = url
self.username = username
self._path_exists = None
def get_default_env(self):
return {}
def run(self, *args, **kwargs):
# type: (*Any, **Any) -> str
kwargs.setdefault('cwd', self.path)
input = kwargs.pop('input', None)
proc = self._construct_subprocess(*args, **kwargs)
return self._execute_subproccess(proc, *args, input=input)
def _construct_subprocess(self, *args, **kwargs):
# type: (*Any, **Any) -> Popen
"""Construct a subprocess with the correct arguments and environment"""
env = os.environ.copy()
for key, value in self.get_default_env().iteritems():
env.setdefault(key, value)
env.setdefault('CHANGES_SSH_REPO', self.url)
for key, value in kwargs.pop('env', {}):
env[key] = value
kwargs['env'] = env
kwargs['close_fds'] = True
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('stdin', PIPE)
proc = Popen(*args, **kwargs)
return proc
def _execute_subproccess(self, proc, *args, **kwargs):
# type: (Popen, *Any, **Any) -> str
"""Execute subproccess and handle errors"""
(stdout, stderr) = proc.communicate(**kwargs)
if proc.returncode != 0:
raise CommandError(args[0], proc.returncode, stdout, stderr)
return stdout
@classmethod
def get_repository_name(cls, repository_url):
"""
Given a repository URL, get the repository name.
Examples:
example.com:test.git -> test.git
example.com:test -> test
example.com:prefix/test.git -> test.git
example.com:test.git -> test
"""
# type: (str) -> str
path = repository_url.split(':')[-1].strip('/')
return os.path.basename(path)
@staticmethod
def get_clone_command(remote_url, path, revision, clean=True, cache_dir=None):
# type: (str, str, str, bool, Optional[str]) -> str
raise NotImplementedError
def exists(self):
return os.path.exists(self.path)
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def log(self, parent=None, branch=None, author=None, offset=0, limit=100):
""" Gets the commit log for the repository.
Only one of parent or branch can be specified for restricting searches.
If parent is set, it is used to identify any ancestor revisions,
regardless of their branch.
If branch is set, all revisions in the branch AND any ancestor commits
are returned.
For any revisions returned, the list of associated branches returned is
tool specific and may or may not include ancestor branch names. See tool
implementations for exact behavior of this function.
:param parent: Parent at which revision search begins.
:param branch: Branch name the revision must be associated with.
:param author: The author name or email to filter results.
:param offset: An offset into the results at which to begin.
:param limit: The maximum number of results to return.
:return: A list of revisions matching the given criteria.
"""
raise NotImplementedError
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
raise NotImplementedError
def get_changed_files(self, id):
# type: (str) -> Set[str]
"""Returns the list of files changed in a revision.
Args:
id (str): The id of the revision.
Returns:
A set of filenames
Raises:
UnknownRevision: If the revision wan't found.
"""
diff = self.export(id)
diff_parser = DiffParser(diff)
return diff_parser.get_changed_files()
def get_default_revision(self):
raise NotImplementedError
def is_child_parent(self, child_in_question, parent_in_question):
raise NotImplementedError
def get_known_branches(self):
""" This is limited to parallel trees with names.
:return: A list of unique names for the branches.
"""
raise NotImplementedError
# XXX(dcramer): not overly happy with the buildstep commands API
def get_buildstep_clone(self, source, workspace, clean=True, cache_dir="/dev/null"):
raise NotImplementedError
def get_buildstep_patch(self, source, workspace):
raise NotImplementedError
def get_buildstep_checkout_revision(self, revision_sha):
# type: (str) -> str
raise NotImplementedError
def get_buildstep_checkout_parent_revision(self, revision_sha):
# type: (str) -> str
raise NotImplementedError
def get_buildstep_changed_files(self, revision_sha):
# type: (str) -> str
raise NotImplementedError
def log_timing(self, command, start_time):
repo_type = 'unknown'
classname = self.__class__.__name__
if "Git" in classname:
repo_type = 'git'
elif "Mercurial" in classname:
repo_type = 'hg'
timer_name = "changes_vcs_perf_{}_command_{}".format(
repo_type, command)
time_taken = time() - start_time
statsreporter.stats().log_timing(timer_name, time_taken * 1000)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
raise NotImplementedError
def get_patch_hash(self, rev_sha):
# type: (str) -> Union[str, None]
"""Return the patch id for a given revision if git, else return None"""
raise NotImplementedError
def _selectively_apply_diff(self, file_path, file_content, diff):
"""A helper function that takes a diff, extract the parts of the diff
relating to `file_path`, and apply it to `file_content`.
If the diff does not involve `file_path`, then `file_content` is
returned, untouched.
Args:
file_path (str) - the path of the file to look for in the diff
file_content (str) - the content of the file to base on
diff (str) - diff in unidiff format
Returns:
str - `file_content` with the diff applied on top of it
Raises:
InvalidDiffError - when the supplied diff is invalid.
"""
parser = DiffParser(diff)
selected_diff = None
for file_info in parser.parse():
if file_info.new_filename is not None and file_info.new_filename[2:] == file_path:
selected_diff = parser.reconstruct_file_diff(file_info)
if selected_diff is None:
return file_content
temp_patch_file_path = None
temp_dir = None
try:
# create a temporary file to house the patch
fd, temp_patch_file_path = tempfile.mkstemp()
os.write(fd, selected_diff)
os.close(fd)
# create a temporary folder where we will mimic the structure of
# the repo, with only the config inside it
dir_name, _ = os.path.split(file_path)
temp_dir = tempfile.mkdtemp()
if len(dir_name) > 0:
os.makedirs(os.path.join(temp_dir, dir_name))
temp_file_path = os.path.join(temp_dir, file_path)
with open(temp_file_path, 'w') as f:
f.write(file_content)
# apply the patch
try:
check_call([
'patch',
'--strip=1',
'--unified',
'--directory={}'.format(temp_dir),
'--input={}'.format(temp_patch_file_path),
])
except CalledProcessError:
raise InvalidDiffError
with open(temp_file_path, 'r') as f:
patched_content = f.read()
finally:
# clean up
if temp_patch_file_path and os.path.exists(temp_patch_file_path):
os.remove(temp_patch_file_path)
if temp_dir and os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
return patched_content
class RevisionResult(object):
parents = None # type: List[str]
branches = None # type: List[str]
def __init__(self, id, message, author, author_date, committer=None,
committer_date=None, parents=None, branches=None):
self.id = id
self.message = message
self.author = author
self.author_date = author_date
self.committer = committer or author
self.committer_date = committer_date or author_date
if parents is not None:
self.parents = parents
if branches is not None:
self.branches = branches
def __repr__(self):
return '<%s: id=%r author=%r subject=%r>' % (
type(self).__name__, self.id, self.author, self.subject)
def _get_author(self, value):
match = re.match(r'^(.+) <([^>]+)>$', value)
if not match:
if '@' in value:
name, email = value, value
else:
name, email = value, '{0}@localhost'.format(value)
else:
name, email = match.group(1), match.group(2)
author, _ = get_or_create(Author, where={
'email': email,
}, defaults={
'name': name,
})
return author
@property
def subject(self):
return self.message.splitlines()[0]
def save(self, repository):
author = self._get_author(self.author)
if self.author == self.committer:
committer = author
else:
committer = self._get_author(self.committer)
revision, created = create_or_update(Revision, where={
'repository': repository,
'sha': self.id,
}, values={
'author': author,
'committer': committer,
'message': self.message,
'parents': self.parents,
'branches': self.branches,
'date_created': self.author_date,
'date_committed': self.committer_date,
})
# This call is relatively expensive - only do if necessary.
if created:
vcs = repository.get_vcs()
if vcs:
revision.patch_hash = vcs.get_patch_hash(self.id)
# we also want to create a source for this item as it's the canonical
# representation in the UI
source = try_create(Source, {
'revision_sha': self.id,
'repository': repository,
})
return (revision, created, source)
| {
"repo_name": "dropbox/changes",
"path": "changes/vcs/base.py",
"copies": "1",
"size": "14387",
"license": "apache-2.0",
"hash": -1313017522074860800,
"line_mean": 31.6977272727,
"line_max": 94,
"alpha_frac": 0.5936609439,
"autogenerated": false,
"ratio": 4.264078245406046,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001593388575112039,
"num_lines": 440
} |
from __future__ import absolute_import, division, print_function
import os
import os.path
import re
import shutil
import tempfile
from subprocess import Popen, PIPE, check_call, CalledProcessError
from changes.constants import PROJECT_ROOT
from changes.db.utils import create_or_update, get_or_create, try_create
from changes.models import Author, Revision, Source
from changes.config import statsreporter
from changes.utils.diff_parser import DiffParser
from time import time
class CommandError(Exception):
def __init__(self, cmd, retcode, stdout=None, stderr=None):
self.cmd = cmd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def __unicode__(self):
return '%s returned %d:\nSTDOUT: %r\nSTDERR: %r' % (
self.cmd, self.retcode, self.stdout, self.stderr)
def __str__(self):
return self.__unicode__().encode('utf-8')
class UnknownRevision(CommandError):
"""Indicates that an operation was attempted on a
revision that doesn't appear to exist."""
pass
class InvalidDiffError(Exception):
"""This is used when a diff is invalid and fails to apply. It is NOT
a subclass of CommandError, as it is not a vcs command"""
pass
class BufferParser(object):
def __init__(self, fp, delim):
self.fp = fp
self.delim = delim
def __iter__(self):
chunk_buffer = []
for chunk in self.fp:
while chunk.find(self.delim) != -1:
d_pos = chunk.find(self.delim)
chunk_buffer.append(chunk[:d_pos])
yield ''.join(chunk_buffer)
chunk_buffer = []
chunk = chunk[d_pos + 1:]
if chunk:
chunk_buffer.append(chunk)
if chunk_buffer:
yield ''.join(chunk_buffer)
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect')
def __init__(self, path, url, username=None):
self.path = path
self.url = url
self.username = username
self._path_exists = None
def get_default_env(self):
return {}
def run(self, *args, **kwargs):
if self.exists():
kwargs.setdefault('cwd', self.path)
env = os.environ.copy()
for key, value in self.get_default_env().iteritems():
env.setdefault(key, value)
env.setdefault('CHANGES_SSH_REPO', self.url)
for key, value in kwargs.pop('env', {}):
env[key] = value
kwargs['env'] = env
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
proc = Popen(*args, **kwargs)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise CommandError(args[0], proc.returncode, stdout, stderr)
return stdout
def exists(self):
return os.path.exists(self.path)
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def log(self, parent=None, branch=None, author=None, offset=0, limit=100):
""" Gets the commit log for the repository.
Only one of parent or branch can be specified for restricting searches.
If parent is set, it is used to identify any ancestor revisions,
regardless of their branch.
If branch is set, all revisions in the branch AND any ancestor commits
are returned.
For any revisions returned, the list of associated branches returned is
tool specific and may or may not include ancestor branch names. See tool
implementations for exact behavior of this function.
:param parent: Parent at which revision search begins.
:param branch: Branch name the revision must be associated with.
:param author: The author name or email to filter results.
:param offset: An offset into the results at which to begin.
:param limit: The maximum number of results to return.
:return: A list of revisions matching the given criteria.
"""
raise NotImplementedError
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
raise NotImplementedError
def get_default_revision(self):
raise NotImplementedError
def is_child_parent(self, child_in_question, parent_in_question):
raise NotImplementedError
def get_known_branches(self):
""" This is limited to parallel trees with names.
:return: A list of unique names for the branches.
"""
raise NotImplementedError
# XXX(dcramer): not overly happy with the buildstep commands API
def get_buildstep_clone(self, source, workspace):
raise NotImplementedError
def get_buildstep_patch(self, source, workspace):
raise NotImplementedError
def log_timing(self, command, start_time):
repo_type = 'unknown'
classname = self.__class__.__name__
if "Git" in classname:
repo_type = 'git'
elif "Mercurial" in classname:
repo_type = 'hg'
timer_name = "changes_vcs_perf_{}_command_{}".format(
repo_type, command)
time_taken = time() - start_time
statsreporter.stats().log_timing(timer_name, time_taken * 1000)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
raise NotImplementedError
def _selectively_apply_diff(self, file_path, file_content, diff):
"""A helper function that takes a diff, extract the parts of the diff
relating to `file_path`, and apply it to `file_content`.
If the diff does not involve `file_path`, then `file_content` is
returned, untouched.
Args:
file_path (str) - the path of the file to look for in the diff
file_content (str) - the content of the file to base on
diff (str) - diff in unidiff format
Returns:
str - `file_content` with the diff applied on top of it
Raises:
InvalidDiffError - when the supplied diff is invalid.
"""
parser = DiffParser(diff)
selected_diff = None
for file_dict in parser.parse():
if file_dict['new_filename'] is not None and file_dict['new_filename'][2:] == file_path:
selected_diff = parser.reconstruct_file_diff(file_dict)
if selected_diff is None:
return file_content
temp_patch_file_path = None
temp_dir = None
try:
# create a temporary file to house the patch
fd, temp_patch_file_path = tempfile.mkstemp()
os.write(fd, selected_diff)
os.close(fd)
# create a temporary folder where we will mimic the structure of
# the repo, with only the config inside it
dir_name, _ = os.path.split(file_path)
temp_dir = tempfile.mkdtemp()
if len(dir_name) > 0:
os.makedirs(os.path.join(temp_dir, dir_name))
temp_file_path = os.path.join(temp_dir, file_path)
with open(temp_file_path, 'w') as f:
f.write(file_content)
# apply the patch
try:
check_call([
'patch',
'--strip=1',
'--unified',
'--directory={}'.format(temp_dir),
'--input={}'.format(temp_patch_file_path),
])
except CalledProcessError:
raise InvalidDiffError
with open(temp_file_path, 'r') as f:
patched_content = f.read()
finally:
# clean up
if temp_patch_file_path and os.path.exists(temp_patch_file_path):
os.remove(temp_patch_file_path)
if temp_dir and os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
return patched_content
class RevisionResult(object):
parents = None
branches = None
def __init__(self, id, message, author, author_date, committer=None,
committer_date=None, parents=None, branches=None):
self.id = id
self.message = message
self.author = author
self.author_date = author_date
self.committer = committer or author
self.committer_date = committer_date or author_date
if parents is not None:
self.parents = parents
if branches is not None:
self.branches = branches
def __repr__(self):
return '<%s: id=%r author=%r subject=%r>' % (
type(self).__name__, self.id, self.author, self.subject)
def _get_author(self, value):
match = re.match(r'^(.+) <([^>]+)>$', value)
if not match:
if '@' in value:
name, email = value, value
else:
name, email = value, '{0}@localhost'.format(value)
else:
name, email = match.group(1), match.group(2)
author, _ = get_or_create(Author, where={
'email': email,
}, defaults={
'name': name,
})
return author
@property
def subject(self):
return self.message.splitlines()[0]
def save(self, repository):
author = self._get_author(self.author)
if self.author == self.committer:
committer = author
else:
committer = self._get_author(self.committer)
revision, created = create_or_update(Revision, where={
'repository': repository,
'sha': self.id,
}, values={
'author': author,
'committer': committer,
'message': self.message,
'parents': self.parents,
'branches': self.branches,
'date_created': self.author_date,
'date_committed': self.committer_date,
})
# we also want to create a source for this item as it's the canonical
# representation in the UI
source = try_create(Source, {
'revision_sha': self.id,
'repository': repository,
})
return (revision, created, source)
| {
"repo_name": "bowlofstew/changes",
"path": "changes/vcs/base.py",
"copies": "2",
"size": "10849",
"license": "apache-2.0",
"hash": -1072897624959513100,
"line_mean": 31.4820359281,
"line_max": 100,
"alpha_frac": 0.5811595539,
"autogenerated": false,
"ratio": 4.300039635354737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000666067937465736,
"num_lines": 334
} |
from __future__ import (absolute_import, division, print_function)
import os
import os.path
import unittest
from PIL import Image
import imagehash
class TestImageHash(unittest.TestCase):
@staticmethod
def get_data_image(fname=None):
if fname is None:
fname = 'imagehash.png'
dname = os.path.abspath(os.path.dirname(__file__))
target = os.path.join(dname, 'data', fname)
if not os.path.isfile(target):
emsg = 'Unknown test image file: {!r}'
raise ValueError(emsg.format(target))
return Image.open(target)
def check_hash_algorithm(self, func, image):
original_hash = func(image)
rotate_image = image.rotate(-1)
rotate_hash = func(rotate_image)
distance = original_hash - rotate_hash
emsg = ('slightly rotated image should have '
'similar hash {} {} {}'.format(original_hash, rotate_hash,
distance))
self.assertTrue(distance <= 10, emsg)
rotate_image = image.rotate(-90)
rotate_hash = func(rotate_image)
emsg = ('rotated image should have different '
'hash {} {}'.format(original_hash, rotate_hash))
self.assertNotEqual(original_hash, rotate_hash, emsg)
distance = original_hash - rotate_hash
emsg = ('rotated image should have larger different '
'hash {} {} {}'.format(original_hash, rotate_hash,
distance))
self.assertTrue(distance > 10, emsg)
def check_hash_length(self, func, image, sizes=range(2,21)):
for hash_size in sizes:
image_hash = func(image, hash_size=hash_size)
emsg = 'hash_size={} is not respected'.format(hash_size)
self.assertEqual(image_hash.hash.size, hash_size**2, emsg)
def check_hash_stored(self, func, image, sizes=range(2,21)):
for hash_size in sizes:
image_hash = func(image, hash_size)
other_hash = imagehash.hex_to_hash(str(image_hash))
emsg = 'stringified hash {} != original hash {}'.format(other_hash,
image_hash)
self.assertEqual(image_hash, other_hash, emsg)
distance = image_hash - other_hash
emsg = ('unexpected hamming distance {}: original hash {} '
'- stringified hash {}'.format(distance, image_hash,
other_hash))
self.assertEqual(distance, 0, emsg)
def check_hash_size(self, func, image, sizes=range(-1,2)):
for hash_size in sizes:
with self.assertRaises(ValueError):
func(image, hash_size)
| {
"repo_name": "JohannesBuchner/imagehash",
"path": "tests/utils.py",
"copies": "1",
"size": "2788",
"license": "bsd-2-clause",
"hash": -7766981027278081000,
"line_mean": 40.6119402985,
"line_max": 79,
"alpha_frac": 0.5638450502,
"autogenerated": false,
"ratio": 4.142644873699852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015621526069287264,
"num_lines": 67
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
from six.moves import urllib
import tflearn
from tflearn.data_utils import *
path = "alltext.txt"
char_idx_file = 'char_idx.pickle'
maxlen = 25
char_idx = None
X, Y, char_idx = textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)
pickle.dump(char_idx, open(char_idx_file,'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_writerstyle')
for i in range(150):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='writerstyle')
print("-- TESTING...")
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
| {
"repo_name": "aicentral/writestyle",
"path": "RunThis.py",
"copies": "1",
"size": "1468",
"license": "mit",
"hash": 5609450723688931000,
"line_mean": 31.6222222222,
"line_max": 92,
"alpha_frac": 0.6457765668,
"autogenerated": false,
"ratio": 3.0647181628392484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9206985171694206,
"avg_score": 0.0007019115890083632,
"num_lines": 45
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
from six.moves import urllib
import tflearn
from tflearn.data_utils import *
path = "../../data/shakespeare/shakespeare_input.txt"
char_idx_file = '../../data/shakespeare/char_idx.pickle'
if not os.path.isfile(path):
urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/shakespeare_input.txt", path)
maxlen = 25
char_idx = None
if os.path.isfile(char_idx_file):
print('Loading previous char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
pickle.dump(char_idx, open(char_idx_file,'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='lgs-ckpt/model_shakespeare')
for i in range(50):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='shakespeare')
print("-- TESTING...")
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
| {
"repo_name": "hashware/tflearn-learn",
"path": "examples/nlp/lstm_generator_shakespeare.py",
"copies": "1",
"size": "1906",
"license": "mit",
"hash": -8632769314464578000,
"line_mean": 34.9622641509,
"line_max": 138,
"alpha_frac": 0.648478489,
"autogenerated": false,
"ratio": 3.069243156199678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4217721645199678,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
from six.moves import urllib
import tflearn
from tflearn.data_utils import *
path = "great gatsby.txt"
char_idx_file = 'char_idx.pickle'
maxlen = 25
char_idx = None
if os.path.isfile(char_idx_file):
print('Loading previous char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
pickle.dump(char_idx, open(char_idx_file,'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_shakespeare')
for i in range(2000):
print('Iteration:', i)
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='gatsby')
print("-- TESTING...")
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
| {
"repo_name": "arcyfelix/ML-DL-AI",
"path": "Supervised Learning/NLP/Deep Gatsby/gatsby_gen.py",
"copies": "1",
"size": "1624",
"license": "apache-2.0",
"hash": 7684173786753657000,
"line_mean": 32.1428571429,
"line_max": 79,
"alpha_frac": 0.631773399,
"autogenerated": false,
"ratio": 3.1290944123314066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4260867811331407,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
import shutil
from collections import Mapping, OrderedDict
from typing import Any, Text
import numpy as np
from six import string_types
from six.moves import cPickle, range, zip
from bigarray import MmapArray, MmapArrayWriter, read_mmaparray_header
from odin.fuel.databases import MmapDict, SQLiteDict
from odin.utils import (Progbar, UnitTimer, as_tuple, ctext, eprint,
flatten_list, get_file, is_callable, is_string, wprint)
__all__ = [
'Dataset',
]
# ===========================================================================
# Helper
# ===========================================================================
def _infer_separator(path):
all_sep = ('\t', ' ', ';', ',')
with open(path, 'r') as f:
line = f.readline()
line = line.strip()
orig_length = len(line)
for s in all_sep:
if s not in line:
continue
if 1 < len(line.split(s)) < orig_length:
return s
raise RuntimeError("CSV file with the first line: `%s`, "
"cannot match separator in known list: `%s`"
% (line, str(all_sep)))
_audio_ext = ('.3gp', '.aa', '.aac', '.aax', '.act', '.aiff',
'.amr', '.ape', '.au', '.awb', '.dct', '.dss',
'.dvf', '.flac', '.gsm', '.ivs', '.m4a', '.m4b',
'.m4p', '.mmf', '.mp3', '.mpc', '.msv', '.nsf',
'.ogg,', '.opus', '.raw', '.sln', '.tta', '.vox',
'.wav', '.wma', '.wv', '.webm', '.sph', '.pcm')
_image_ext = ('.tif', '.tiff', '.gif', '.jpeg', '.jpg', '.jif',
'.jfif', '.jp2', '.jpx', '.j2k', '.j2c', '.fpx',
'.pcd', '.png', '.pdf')
_ignore_files = ('.DS_Store',)
def _parse_data_descriptor(path, read_only):
""" Return mapping: name -> (dtype, shape, Data, path) """
if not os.path.isfile(path):
return None
file_ext = os.path.splitext(path)[-1].lower()
file_name = os.path.basename(path)
# ====== ignore ====== #
if os.path.basename(path) in _ignore_files:
return None
# ====== audio file ====== #
if file_ext in _audio_ext:
return [(file_name, ('audio', 'unknown', None, path))]
# ====== image file ====== #
if file_ext in _image_ext:
return [(file_name, ('image', 'unknown', None, path))]
# ====== text file .txt ====== #
if file_ext in ('.txt',):
return [(file_name, ('txt', 'unknown', None, path))]
# ====== check if is csv file ====== #
if file_ext in ('.csv', '.tsv'):
sep = _infer_separator(path)
data = []
# read by manually open file much faster than numpy.genfromtxt
with open(path, 'r') as f:
for line in f:
line = line[:-1]
data.append(line.split(sep))
data = np.array(data, dtype=str)
return [('.'.join(file_name.split('.')[:-1]),
('csv', data.shape, data, path))]
# ====== check if a file is Data ====== #
try:
dtype, shape = read_mmaparray_header(path)
data = MmapArray(path)
assert np.dtype(dtype) == data.dtype and shape == data.shape, \
"Metadata mismatch for MmapArray"
return [(file_name, (data.dtype, data.shape, data, path))]
except Exception: # cannot read the header of MmapArray
pass
# ====== try to load pickle file if possible ====== #
try: # try with unpickling
with open(path, 'rb') as f:
data = cPickle.load(f)
shape_info = 0
if hasattr(data, 'shape'):
shape_info = data.shape
elif hasattr(data, '__len__'):
shape_info = len(data)
return [(file_name, (str(data.dtype) if hasattr(data, 'dtype') else
type(data).__name__,
shape_info, data, path))]
except cPickle.UnpicklingError:
try: # try again with numpy load
with open(path, 'rb') as f:
data = np.load(f)
return [(file_name,
(str(data.dtype) if hasattr(data, 'dtype') else type(data).__name__,
len(data) if hasattr(data, '__len__') else 0, data, path))]
except Exception:
pass
# ====== load memmap dict ====== #
try:
data = MmapDict(path, read_only=read_only)
return [(file_name, ('memdict', len(data), data, path))]
except Exception as e:
pass
# ====== load SQLiteDict ====== #
if '.db' in os.path.splitext(path)[1]:
try:
db = SQLiteDict(path, read_only=read_only)
name = os.path.basename(path).replace('.db', '')
return [(tab if tab != SQLiteDict._DEFAULT_TABLE else name,
('sqlite', len(db.set_table(tab)), db.as_table(tab), path))
for tab in db.get_all_tables()]
except Exception as e:
pass
# ====== unknown datatype ====== #
return [(file_name, ('unknown', 'unknown', None, path))]
# ===========================================================================
# Datasets
# ===========================================================================
class Dataset(object):
""" This Dataset can automatically parse memmap (created by MmapData),
MmapDict, pickled dictionary and hdf5 files and keep tracking the changes.
Any file name with "readme" prefix will be parsed as text and showed as
readme.
Support data type:
- .txt or .csv files:
-
Note
----
for developer: _data_map contains: name -> (dtype, shape, Data or pathtoData)
readme included with the dataset should contain license information
All the file with `.db` extension will be treat as SQLite data
"""
__INSTANCES = {}
def __new__(cls, *args, **kwargs):
path = kwargs.get('path', None)
if path is None:
path = args[0]
if not is_string(path):
raise ValueError("`path` for Dataset must be string, but given "
"object with type: %s" % type(path))
path = os.path.abspath(path)
# Found old instance
if path in Dataset.__INSTANCES:
return Dataset.__INSTANCES[path]
# new Dataset
new_instance = super(Dataset, cls).__new__(cls)
Dataset.__INSTANCES[path] = new_instance
return new_instance
def __init__(self, path, read_only=False, override=False):
path = os.path.abspath(path)
self.read_only = read_only
self._readme_info = [ctext('README:', 'yellow'),
'------',
' No information!']
self._readme_path = None
# flag to check cPickle called with protocol 2
self._new_args_called = False
# parse all data from path
if path is not None:
if override and os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
print('Overrided old dataset at path:', path)
if os.path.isfile(path) and '.zip' in os.path.basename(path):
self._load_archive(path,
extract_path=path.replace(os.path.basename(path), ''),
read_only=read_only)
else:
self._set_path(path, self.read_only)
else:
raise ValueError('Invalid path for Dataset: %s' % path)
def _set_path(self, path, read_only):
MAXIMUM_README_LINE = 25
# all files are opened with default_mode=r+
self._data_map = OrderedDict()
self._path = os.path.abspath(path)
self._default_hdf5 = os.path.basename(self._path) + '_default.h5'
# svaed feeder info
self._saved_indices = {}
self._saved_recipes = {}
# just make new dir
if not os.path.exists(path):
os.mkdir(path)
os.mkdir(self.recipe_path)
os.mkdir(self.index_path)
return # not thing to do more
elif not os.path.isdir(path):
raise ValueError('Dataset path must be a folder.')
# ====== Load all Data ====== #
files = os.listdir(path)
for fname in files:
# found README
if 'readme' == fname[:6].lower():
readme_path = os.path.join(path, fname)
with open(readme_path, 'r') as readme_file:
readme = readme_file.readlines()[:MAXIMUM_README_LINE]
readme = [' ' + i[:-1] for i in readme if len(i) > 0 and i != '\n']
readme.append(' => For more information: ' + readme_path)
self._readme_info = [ctext('README:', 'yellow'),
'------'] + readme
self._readme_path = readme_path
# parse data
data = _parse_data_descriptor(os.path.join(path, fname),
read_only)
if data is None: continue
for key, d in data:
if key in self._data_map:
raise ValueError('Found duplicated data with follow info: '
'{}'.format(key))
else:
self._data_map[key] = d
# ==================== Pickle ==================== #
def __getstate__(self):
if not self._new_args_called:
raise RuntimeError(
"You must use argument `protocol=cPickle.HIGHEST_PROTOCOL` "
"when using `pickle` or `cPickle` to be able pickling Dataset.")
self._new_args_called = False
return self.path, self.read_only
def __setstate__(self, states):
path, read_only = states
self._new_args_called = False
self._set_path(path, read_only)
def __getnewargs__(self):
self._new_args_called = True
return (self.path,)
# ==================== archive loading ==================== #
def _load_archive(self, path, extract_path, read_only):
from zipfile import ZipFile, ZIP_DEFLATED
try:
zfile = ZipFile(path, mode='r', compression=ZIP_DEFLATED)
allfile = zfile.namelist()
# validate extract_path
if not os.path.isdir(extract_path):
raise ValueError('Extract path must be path folder, but path'
'={} is a file'.format(extract_path))
extract_path = os.path.join(extract_path,
os.path.basename(path).replace('.zip', ''))
# found the extracted dir, use it
if os.path.isdir(extract_path) and \
set(os.listdir(extract_path)) == set(allfile):
self._set_path(extract_path, read_only=read_only)
return
# decompress everything
if not os.path.exists(extract_path):
os.mkdir(extract_path)
maxlen = max([len(i) for i in allfile])
pb = Progbar(target=len(allfile), name="[Dataset] Loading Archive",
print_summary=True, print_report=True)
for i, f in enumerate(allfile):
zfile.extract(f, path=extract_path)
pb['File'] = ('Unarchiving: %-' + str(maxlen) + 's') % f
pb.add(1)
# ====== finally set path ====== #
self._set_path(extract_path, read_only=read_only)
except IOError as e:
raise IOError('Error loading archived dataset, path:{}, error:{}'
'.'.format(path, e))
return None
# ==================== properties ==================== #
@property
def basename(self):
return os.path.basename(self.path)
@property
def path(self):
return self._path
@property
def recipe_path(self):
return os.path.join(self.path, 'recipe')
@property
def index_path(self):
return os.path.join(self.path, 'index')
@property
def archive_path(self):
"""Return default archive path, which is:
../[dataset_name].zip
"""
name = os.path.basename(self._path)
return os.path.join(self._path, '..', name + '.zip')
@property
def md5_checksum(self):
return self.get_md5_checksum()
@property
def size(self):
""" return size in MegaByte"""
size_bytes = 0
for name, (dtype, shape, data, path) in self._data_map.items():
try:
size_bytes += os.path.getsize(path) # in bytes
except Exception as e:
eprint("Cannot acquire file size information, file: %s; error: %s"
% (str(name), str(e)))
return size_bytes / 1024. / 1024.
def __len__(self):
""" Return total number of data """
return len(self._data_map)
def __iter__(self):
return self.items()
def items(self):
for name in self._data_map.keys():
yield name, self.__getitem__(name)
def iterinfo(self):
"""Return iteration of: (dtype, shape, loaded_data, path)"""
for name, (dtype, shape, data, path) in self._data_map.items():
yield (dtype, shape, self.__getitem__(name), path)
def keys(self):
"""
Return
------
name of all Data
"""
return self._data_map.keys()
def values(self):
"""
Return
------
(dtype, shape, data, path) of Data
"""
for k in self._data_map.keys():
yield self.__getitem__(k)
def archive(self):
from zipfile import ZipFile, ZIP_DEFLATED
path = self.archive_path
zfile = ZipFile(path, mode='w', compression=ZIP_DEFLATED)
files = set([_[-1] for _ in self._data_map.values()])
prog = Progbar(target=len(files), name="[Dataset] Archiving",
print_report=True, print_summary=True)
maxlen = max([len(os.path.basename(i)) for i in files])
for i, f in enumerate(files):
zfile.write(f, os.path.basename(f))
prog['Data'] = ('Archiving: %-' + str(maxlen) + 's') \
% os.path.basename(f)
prog.add(1)
zfile.close()
return path
# ==================== Data management ==================== #
def copy(self, destination,
indices_filter=None, data_filter=None,
override=False):
""" Copy the dataset to a new folder and closed
the old dataset
"""
from distutils.dir_util import copy_tree
read_only = self.read_only
raise NotImplementedError
def flush(self):
for dtype, shape, data, path in self._data_map.values():
if hasattr(data, 'flush'):
data.flush()
elif data is not None: # Flush pickling data
with open(path, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
def close(self, name=None):
# ====== close all Data ====== #
if name is None: # close all files
for name, (dtype, shape, data, path) in list(self._data_map.items()):
if hasattr(data, 'close'):
data.close()
del data
del self._data_map[name]
# close all external indices and recipes
for name, ids in self._saved_indices.items():
ids.close()
self._saved_indices.clear()
for name, rcp in list(self._saved_recipes.items()):
del rcp
self._saved_recipes.clear()
# Check if exist global instance
if self.path in Dataset.__INSTANCES:
del Dataset.__INSTANCES[self.path]
# ====== close a particular file ====== #
elif name in self._data_map:
(dtype, shape, data, path) = self._data_map[name]
if dtype == 'sqlite':
data.sqlite.close()
elif hasattr(data, 'close'):
data.close()
del data
del self._data_map[name]
# ==================== Some info ==================== #
def __contains__(self, key):
return key in self._data_map
def find_prefix(self, feat_name, prefix):
""" Specialized method for searching for Data or NoSQL
with prefix, for example `prefix='indices'`:
- `indices_%s_%s` % (feat1_name, feat2, ...)
if no indices found, return the default indices with
name 'indices'
"""
indices = self[prefix] if prefix in self else None
for key in self.keys():
if prefix == key[:len(prefix)] and '_' + feat_name in key:
indices = self[key]
if indices is None:
raise RuntimeError("Cannot find prefix: '%s' for feature with name: '%s', "
"all available name with given prefix are: %s" %
(prefix, feat_name, ','.join([k for k in self.keys()
if prefix == k[:len(k)]])
))
return indices
def __getitem__(self, key):
if is_string(key):
if key not in self._data_map:
raise KeyError('%s not found in this dataset' % key)
dtype, shape, data, path = self._data_map[key]
return path if data is None else data
raise ValueError('Only accept key type is string.')
def get(self, key, default=None):
if key in self._data_map:
return self.__getitem__(key)
return default
def __setitem__(self, name: Text, value: Any):
"""
Parameters
----------
key : str or tuple
if tuple is specified, it contain the key and the datatype
which must be "memmap", "hdf5"
for example: ds[('X', 'hdf5')] = numpy.ones((8, 12))
"""
assert isinstance(name, string_types), \
"name must be given as string types."
path = os.path.join(self.path, name)
with open(path, 'wb') as f:
pickle.dump(value, f)
self._data_map[name] = (
value.dtype if hasattr(value, 'dtype') else str(type(value)),
value.shape if hasattr(value, 'shape') else 'unknown',
value, path)
def get_md5_checksum(self, excluded_name=[]):
from odin.utils.crypto import md5_checksum
md5_text = ''
all_data_items = {i: j
for i, j in self._data_map.items()
if i not in excluded_name}
for name, (dtype, shape, data, path) in sorted(all_data_items.items(),
key=lambda x: x[0]):
md5_text += md5_checksum(path)
return md5_text
def __str__(self):
padding = ' '
# NOTE: each element in the list is one line
s = ['========== ' +
ctext('Dataset:%s Total:%d Size:%.2f(MB)', 'magenta') %
(self.path, len(self._data_map), self.size) +
' ==========']
s += self._readme_info
s += [ctext('DATA:', 'yellow'),
'----']
# ====== Find longest string ====== #
longest_name = 0
longest_shape = 0
longest_dtype = 0
longest_file = 0
print_info = []
for name, (dtype, shape, data, path) in sorted(self._data_map.items()):
shape = data.shape if hasattr(data, 'shape') else shape
longest_name = max(len(name), longest_name)
longest_dtype = max(len(str(dtype)), longest_dtype)
longest_shape = max(len(str(shape)), longest_shape)
longest_file = max(len(str(path)), longest_file)
print_info.append([name, dtype, shape, path])
# ====== return print string ====== #
format_str = (padding + '%-' + str(longest_name + 2) + 's '
'%-' + str(longest_dtype) + 's' + ctext(':', 'yellow') +
'%-' + str(longest_shape) + 's '
'path:%-' + str(longest_file) + 's')
for name, dtype, shape, path in print_info:
s.append(format_str % ('"%s"' % name, dtype, shape, path))
# ====== add recipes info ====== #
for name, recipe in self._saved_recipes.items():
s.append(ctext('(Recipe) ', 'yellow') + '"%s"' % name)
for rcp in recipe:
rcp = str(rcp)
s.append('\n'.join([padding + line
for line in rcp.split('\n')]))
# ====== add indices info ====== #
for name, index in self._saved_indices.items():
s.append(ctext('(Index) ', 'yellow') + '"%s"' % name)
s.append(padding + str(index))
name, (start, end) = next(index.items())
s.append(padding + 'Sample: "%s %d-%d"' % (name, start, end))
return '\n'.join(s)
@property
def readme(self):
""" return text string of README of this dataset """
if self._readme_path is not None:
with open(self._readme_path, 'r') as f:
readme = f.read()
else:
readme = self._readme_info[-1]
return readme
| {
"repo_name": "imito/odin",
"path": "odin/fuel/dataset.py",
"copies": "1",
"size": "19397",
"license": "mit",
"hash": -8876937914407351000,
"line_mean": 34.3959854015,
"line_max": 81,
"alpha_frac": 0.5524565654,
"autogenerated": false,
"ratio": 3.634438823308975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46868953887089754,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
import shutil
import warnings
from collections import OrderedDict, defaultdict
from enum import Enum
from numbers import Number
import numba as nb
import numpy as np
from scipy.io import wavfile
from odin import fuel as F
from odin import visual as V
from odin.stats import freqcount, sampling_iter
from odin.utils import (Progbar, args_parse, cache_disk, catch_warnings_error,
catch_warnings_ignore, crypto, ctext, get_exppath,
get_logpath, get_module_from_path, get_script_name,
get_script_path, mpi, select_path)
# ===========================================================================
# Configuration
# ===========================================================================
class Config(object):
# ====== Acoustic features ====== #
FRAME_LENGTH = 0.025
STEP_LENGTH = 0.01
SAMPLE_RATE = 8000
WINDOW = 'hamm'
NFFT = 512
# Random seed for reproducibility
SUPER_SEED = 87654321
class SystemStates(Enum):
""" SystemStates """
UNKNOWN = 0
EXTRACT_FEATURES = 1
TRAINING = 2
SCORING = 3
# ===========================================================================
# General arguments for all experiments
# ===========================================================================
_args = args_parse(descriptions=[
('recipe', 'recipe is the name of acoustic Dataset defined in feature_recipes.py', None),
('-feat', 'specific name for the acoustic features, extracted from the given recipe', None, ''),
('-aug', 'augmentation dataset: musan, rirs; could be multiple dataset '
'for training: "musan,rirs"', None, 'None'),
('-ncpu', 'number of CPU to be used, if <= 0, auto-select', None, 0),
# for scoring
('-sys', 'name of the system for scoring: xvec, ivec, e2e ...', None, 'xvec'),
('-sysid', 'when a system is saved multiple checkpoint (e.g. sys.0.ai)', None, '-1'),
('-score', 'name of dataset for scoring, multiple dataset split by ","', None, 'sre18dev,sre18eval'),
('-backend', 'list of dataset for training the backend: '
'PLDA, SVM or Cosine', None, 'sre04,sre05,sre06,sre08,sre10,mx6'),
('-lda', 'if > 0, running LDA before training the backend '
'with given number of components', None, 0),
('-plda', 'number of PLDA components, must be > 0 ', None, 150),
('--mll', 'pre-fitting maximum likelihood before training PLDA', None, False),
('--showllk', 'show LLK during training of PLDA, this will slow thing down', None, False),
# for training
('-downsample', 'absolute number of files used for training', None, 0),
('-exclude', 'list of excluded dataset not for training,'
'multiple dataset split by ","', None, ''),
# for ivector
('-nmix', 'for i-vector training, number of Gaussian components', None, 2048),
('-tdim', 'for i-vector training, number of latent dimension for i-vector', None, 600),
# for DNN
('-utt', 'maximum length of sequence for training', None, 3),
('-seq', 'sequencing mode for training data, cut or pad', None, 'cut'),
('-batch', 'batch size, for training DNN, kaldi use 64, we use 128', None, 128),
('-epoch', 'number of epoch, for training DNN, kaldi only 3 epochs', None, 12),
('-clip', 'The maximum change in parameters allowed per minibatch, '
'measured in Euclidean norm over the entire model (change '
'will be clipped to this value), kaldi use 2.0', None, 2.0),
('-lr', 'learning rate for Adam, kaldi use 0.001 by default,'
' we use 0.01', None, 0.01),
# others
('-mindur', 'for filtering utterances, minimum duration of utterance '
'for training (in second)', None, 1),
('-minutt', 'for filtering utterances, minimum number of utterance of '
'each speaker for training', None, 3),
('--override', 'override previous experiments', None, False),
('--debug', 'enable debugging', None, False),
])
IS_DEBUGGING = bool(_args.debug)
IS_OVERRIDE = bool(_args.override)
MINIMUM_UTT_DURATION = int(_args.mindur) # in seconds
assert MINIMUM_UTT_DURATION > 0, "Minimum utterances duration must be greater than 0"
MINIMUM_UTT_PER_SPEAKERS = int(_args.minutt) # number of utterances
# this variable determine which state is running
CURRENT_STATE = SystemStates.UNKNOWN
# ====== Features extraction ====== #
FEATURE_RECIPE = str(_args.recipe)
FEATURE_NAME = FEATURE_RECIPE.split('_')[0] if len(str(_args.feat)) == 0 else str(_args.feat)
AUGMENTATION_NAME = _args.aug
TRAINING_DATASET = ['mx6', 'voxceleb1', 'voxceleb2', 'swb', 'fisher',
'sre04', 'sre05', 'sre06', 'sre08', 'sre10']
# ====== DNN ====== #
BATCH_SIZE = int(_args.batch)
EPOCH = int(_args.epoch)
LEARNING_RATE = float(_args.lr)
GRADIENT_CLIPPING = float(_args.clip)
# ====== searching for the appropriate system ====== #
SCORE_SYSTEM_NAME = _args.sys
SCORE_SYSTEM_ID = int(_args.sysid)
N_LDA = int(_args.lda)
N_PLDA = int(_args.plda)
assert N_PLDA > 0, "Number of PLDA components must > 0, but given: %d" % N_PLDA
PLDA_MAXIMUM_LIKELIHOOD = bool(_args.mll)
PLDA_SHOW_LLK = bool(_args.showllk)
# ====== system ====== #
NCPU = min(18, mpi.cpu_count() - 2) if _args.ncpu <= 0 else int(_args.ncpu)
# ====== helper for checking the requirement ====== #
def _check_feature_extraction_requirement():
# check requirement for feature extraction
from shutil import which
if which('sox') is None:
raise RuntimeError("`sox` was not installed")
if which('sph2pipe') is None:
raise RuntimeError("`sph2pipe` was not installed")
if which('ffmpeg') is None:
raise RuntimeError("`ffmpeg` was not installed")
def _check_recipe_name_for_extraction():
# check the requirement of recipe name for feature extraction
if '_' in FEATURE_RECIPE:
raise ValueError("'_' can appear in recipe name which is: '%s'" % FEATURE_RECIPE)
# ====== check the running script to determine the current running states ====== #
_script_name = get_script_name()
if _script_name in ('speech_augmentation', 'speech_features_extraction'):
CURRENT_STATE = SystemStates.EXTRACT_FEATURES
_check_feature_extraction_requirement()
_check_recipe_name_for_extraction()
elif _script_name in ('train_xvec', 'train_ivec', 'train_tvec',
'train_evec', 'analyze', 'analyze_data'):
CURRENT_STATE = SystemStates.TRAINING
elif _script_name in ('make_score'):
CURRENT_STATE = SystemStates.SCORING
_check_feature_extraction_requirement()
else:
raise RuntimeError("Unknown states for current running script: %s/%s" %
(get_script_path(), get_script_name()))
# some fancy log of current state
print(ctext('====================================', 'red'))
print(ctext("System state:", 'cyan'), ctext(CURRENT_STATE, 'yellow'))
print(ctext('====================================', 'red'))
# ===========================================================================
# FILE LIST PATH
# ===========================================================================
# ====== basic directories ====== #
EXP_DIR = get_exppath('sre', override=False)
# this folder store extracted vectors for training backend and extracting scores
VECTORS_DIR = os.path.join(EXP_DIR, 'vectors')
if not os.path.exists(VECTORS_DIR):
os.mkdir(VECTORS_DIR)
# this folder store the results
RESULT_DIR = os.path.join(EXP_DIR, 'results')
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
# this folder store the analysis
ANALYSIS_DIR = os.path.join(EXP_DIR, 'analysis')
if not os.path.exists(ANALYSIS_DIR):
os.mkdir(ANALYSIS_DIR)
# ====== raw data ====== #
PATH_BASE = select_path(
'/media/data2/SRE_DATA',
'/mnt/sda1/SRE_DATA',
'/mnt/sdb1/SRE_DATA',
default='')
# path to directory contain following folders:
##############
# * fisher
# * mx6
# * sre04
# * sre05
# * sre06
# * sre08
# * sre10
# * swb
# * voxceleb1
# * voxceleb2
###############
# * musan
# * rirs
###############
# * sre18dev
# * sre18eval
PATH_RAW_DATA = {
'mx6': PATH_BASE,
'voxceleb1': PATH_BASE,
'voxceleb2': PATH_BASE,
'swb': PATH_BASE,
'fisher': PATH_BASE,
'sre04': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre05': os.path.join(PATH_BASE, 'NIST1996_2008/SRE96_SRE05'),
'sre06': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre08': PATH_BASE,
'sre10': PATH_BASE,
'sre18dev': PATH_BASE,
'sre18eval': PATH_BASE,
# noise datasets
'musan': PATH_BASE,
'rirs': PATH_BASE,
}
# all features will be stored here
OUTPUT_DIR = select_path(
'/home/trung/data',
'/media/data1',
'/mnt/sda1'
)
PATH_ACOUSTIC_FEATURES = os.path.join(OUTPUT_DIR, "SRE_FEAT")
if not os.path.exists(PATH_ACOUSTIC_FEATURES):
os.mkdir(PATH_ACOUSTIC_FEATURES)
# ===========================================================================
# Load the file list
# ===========================================================================
sre_file_list = F.load_sre_list()
print('README at:', ctext(sre_file_list['README.txt'], 'cyan'))
sre_file_list = {k: v
for k, v in sre_file_list.items()
if isinstance(v, np.ndarray)}
print("Original dataset:")
for k, v in sorted(sre_file_list.items(), key=lambda x: x[0]):
print(' ', ctext('%-18s' % k, 'yellow'), ':',
ctext(v.shape, 'cyan'))
# ===========================================================================
# Validate scoring dataset
# ===========================================================================
def validate_scoring_dataset(in_path_raw, score_dataset, file_must_exist=True):
all_files = {}
for dsname in score_dataset:
if dsname not in sre_file_list:
raise ValueError("Cannot find dataset with name: '%s' in the file list" % dsname)
if dsname not in in_path_raw:
raise ValueError("Cannot find dataset with name: '%s' in provided path" % dsname)
base_path = in_path_raw[dsname]
ds = []
for row in sre_file_list[dsname]:
path = os.path.join(base_path, row[0])
# every file must exist
if bool(file_must_exist) and not os.path.exists(path):
raise RuntimeError("File not exist at path: %s" % path)
ds.append([path] + row[1:4].tolist() + [dsname])
all_files[dsname] = np.array(ds)
# Header:
# 0 1 2 3 4
# path, channel, name, something, dataset_name
return all_files
# ====== check dataset for scoring ====== #
if CURRENT_STATE == SystemStates.SCORING:
assert len(_args.score) > 0, \
"No dataset are provided for scoring, specify '-score' option"
# for scoring
SCORING_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.score).strip().split(','))
print("Processed scoring dataset:")
for dsname, dsarray in sorted(SCORING_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# for training the backend
BACKEND_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.backend).strip().split(','),
file_must_exist=False)
assert len(BACKEND_DATASETS) > 0, \
"Datasets for training the backend must be provided"
print("Processed backend dataset:")
for dsname, dsarray in sorted(BACKEND_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# ===========================================================================
# Validating the Noise dataset for augmentation
# ===========================================================================
@cache_disk
def validating_noise_data(in_path_raw):
# preparing
noise_dataset = ['musan', 'rirs']
all_files = defaultdict(list)
n_files = sum(len(sre_file_list[i])
for i in noise_dataset
if i in sre_file_list)
n_non_exist = 0
n_exist = 0
prog = Progbar(target=n_files, print_summary=True,
name="Validating noise dataset")
prog.set_summarizer(key='#Non-exist', fn=lambda x: x[-1])
prog.set_summarizer(key='#Exist', fn=lambda x: x[-1])
# check all dataset
for ds_name in noise_dataset:
if ds_name not in sre_file_list:
continue
if ds_name not in in_path_raw:
continue
base_path = in_path_raw[ds_name]
base_ds = all_files[ds_name]
# start validating
for row in sre_file_list[ds_name]:
# check file
path, channel, name, noise_type, duration = row[:5]
path = os.path.join(base_path, path)
if os.path.exists(path):
base_ds.append([path, channel, name, noise_type, duration])
n_exist += 1
else:
n_non_exist += 1
# update progress
prog['ds'] = ds_name
prog['#Exist'] = n_exist
prog['#Non-exist'] = n_non_exist
prog.add(1)
# ====== return ====== #
# Header:
# 0 1 2 3 4
# path, channel, name, noise_type, duration
return {key: np.array(sorted(val, key=lambda x: x[0]))
for key, val in all_files.items()}
# ==================== run the validation ==================== #
if CURRENT_STATE == SystemStates.EXTRACT_FEATURES:
ALL_NOISE = validating_noise_data(
in_path_raw=PATH_RAW_DATA)
print("Processed noise data:")
for ds_name, noise_list in ALL_NOISE.items():
print(" ", ctext(ds_name, 'yellow'), ':', noise_list.shape)
if len(noise_list) == 0:
continue
for name, count in sorted(freqcount(noise_list[:, 3]).items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % name, 'yellow'), ':',
'%s(files)' % ctext('%-6d' % count, 'cyan'))
# ===========================================================================
# Validating the file list of training data
# ===========================================================================
@cache_disk
def validating_training_data(in_path_raw, training_dataset):
file_list = {ds: sre_file_list[ds]
for ds in training_dataset
if ds in sre_file_list}
# ====== meta info ====== #
all_files = []
non_exist_files = []
extension_count = defaultdict(int)
total_data = sum(v.shape[0]
for k, v in file_list.items()
if k not in('musan', 'rirs'))
# ====== progress ====== #
prog = Progbar(target=total_data,
print_summary=True, print_report=True,
name="Preprocessing File List")
prog.set_summarizer('#Files', fn=lambda x: x[-1])
prog.set_summarizer('#Non-exist', fn=lambda x: x[-1])
# ====== iterating ====== #
for ds_name, data in sorted(file_list.items(),
key=lambda x: x[0]):
if ds_name in ('musan', 'rirs'):
continue
for row in data:
path, channel, name, spkid = row[:4]
assert channel in ('0', '1')
# check path provided
if ds_name in in_path_raw:
path = os.path.join(in_path_raw[ds_name], path)
# create new row
start_time = '-'
end_time = '-'
if ds_name == 'mx6':
start_time, end_time = row[-2:]
new_row = [path, channel, name,
ds_name + '_' + spkid, ds_name,
start_time, end_time]
# check file exist
if os.path.exists(path):
all_files.append(new_row)
else:
non_exist_files.append(new_row)
# extension
ext = os.path.splitext(path)[-1]
extension_count[ext + '-' + ds_name] += 1
# update progress
prog['Dataset'] = ds_name
prog['#Files'] = len(all_files)
prog['#Non-exist'] = len(non_exist_files)
prog.add(1)
# final results
all_files = np.array(all_files)
if len(all_files) == 0:
return all_files, np.array(non_exist_files), extension_count
# ====== check no duplicated name ====== #
n_files = len(all_files)
n_unique_files = len(np.unique(all_files[:, 2]))
assert n_files == n_unique_files, \
'Found duplicated name: %d != %d' % (n_files, n_unique_files)
# ====== check no duplicated speaker ====== #
n_spk = sum(len(np.unique(dat[:, 3]))
for name, dat in file_list.items()
if name not in ('musan', 'rirs'))
n_unique_spk = len(np.unique(all_files[:, 3]))
assert n_spk == n_unique_spk, \
'Found duplicated speakers: %d != %d' % (n_spk, n_unique_spk)
# ====== return ====== #
# Header:
# 0 1 2 3 4 5 6
# path, channel, name, spkid, dataset, start_time, end_time
return all_files, np.array(non_exist_files), extension_count
# ==================== run the validation process ==================== #
if CURRENT_STATE == SystemStates.EXTRACT_FEATURES:
(ALL_FILES, NON_EXIST_FILES, ext_count) = validating_training_data(
in_path_raw=PATH_RAW_DATA,
training_dataset=TRAINING_DATASET
)
if len(ALL_FILES) == 0:
raise RuntimeError("No files found for feature extraction")
# list of all dataset
ALL_DATASET = sorted(np.unique(ALL_FILES[:, 4]))
print("All extensions:")
for name, val in sorted(ext_count.items(), key=lambda x: x[0]):
print(' ', '%-16s' % name, ':', ctext('%-6d' % val, 'cyan'), '(files)')
print("#Speakers:", ctext(len(np.unique(ALL_FILES[:, 3])), 'cyan'))
# map Dataset_name -> speaker_ID
DS_SPK = defaultdict(list)
for row in ALL_FILES:
DS_SPK[row[4]].append(row[3])
DS_SPK = {k: sorted(set(v))
for k, v in DS_SPK.items()}
print("Processed datasets:")
for name, count in sorted(freqcount(ALL_FILES[:, 4]).items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % name, 'yellow'), ':',
'%s(files)' % ctext('%-6d' % count, 'cyan'),
'%s(spk)' % ctext('%-4d' % len(DS_SPK[name]), 'cyan'))
# ===========================================================================
# PATH HELPER
# ===========================================================================
def get_model_path(system_name, logging=True):
"""
Parameters
----------
args_name : list of string
list of name for parsed argument, taken into account for creating
model name
Return
------
exp_dir, model_path, log_path
"""
args_name = []
if system_name == 'xvec':
args_name += ['utt', 'seq']
elif system_name == 'ivec':
args_name += ['nmix', 'tdim']
else:
raise ValueError("No support for system with name: %s" % system_name)
args_name += ['mindur', 'minutt']
# ====== base system and feature identity ====== #
name = str(system_name).lower()
name += '_' + FEATURE_RECIPE.replace('_', '')
name += '.' + FEATURE_NAME
# ====== concat the attributes ====== #
attributes = []
for i in [str(i) for i in args_name]:
attributes.append(str(getattr(_args, i)))
attributes = '_'.join(attributes)
name += '.' + attributes
# ====== check the exclude dataset ====== #
excluded_dataset = str(_args.exclude).strip()
if len(excluded_dataset) > 0:
dataset_str = []
for excluded in sorted(set(excluded_dataset.split(','))):
assert excluded in sre_file_list or excluded == 'noise', \
"Unknown excluded dataset with name: '%s'" % excluded
dataset_str.append(excluded)
dataset_str = '_'.join(dataset_str)
name += '.' + dataset_str
# ====== check save_path ====== #
save_path = os.path.join(EXP_DIR, name)
if os.path.exists(save_path) and IS_OVERRIDE:
print("Override path:", ctext(save_path, 'yellow'))
shutil.rmtree(save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
# ====== return path ====== #
log_path = get_logpath(name='log.txt', increasing=True,
odin_base=False, root=save_path)
model_path = os.path.join(save_path, 'model.ai')
if bool(logging):
print("Model path:", ctext(model_path, 'cyan'))
print("Log path:", ctext(log_path, 'cyan'))
return save_path, model_path, log_path
# ===========================================================================
# Data helper
# ===========================================================================
def prepare_dnn_feeder_recipe(name2label=None, n_speakers=None,
utt_length=None, seq_mode=None):
if utt_length is None:
utt_length = float(_args.utt)
if seq_mode is None:
seq_mode = str(_args.seq).strip().lower()
frame_length = int(utt_length / Config.STEP_LENGTH)
if seq_mode == 'cut':
seq_train = 'cut'
seq_score = 'mix'
elif seq_mode == 'pad':
seq_train = 'pad'
seq_score = 'pad'
else:
raise ValueError("Only support 'cut' or 'pad' sequencing mode")
recipes = [
F.recipes.Sequencing(frame_length=frame_length,
step_length=frame_length,
end=seq_score if CURRENT_STATE == SystemStates.SCORING
else seq_train,
pad_value=0, pad_mode='post',
data_idx=0),
]
if name2label is not None and n_speakers is not None:
recipes += [
F.recipes.Name2Label(lambda name:name2label[name],
ref_idx=0),
F.recipes.LabelOneHot(nb_classes=n_speakers, data_idx=1)
]
elif (name2label is not None and n_speakers is None) or\
(name2label is None and n_speakers is not None):
raise RuntimeError("name2label and n_speakers must both be None, or not-None")
return recipes
def filter_utterances(X, indices, spkid,
min_dur=None, min_utt=None,
remove_min_length=True, remove_min_uttspk=True,
n_speakers=None, ncpu=None, save_path=None,
title=''):
"""
X : 2-D matrix
input features
indices : Mapping
utterance_name -> (start, end) in `X`
spkid : Mapping
utterance_name -> speaker_id
remove_min_length : bool (default: True)
if True, remove all files shorter than MINIMUM_UTT_DURATION
remove_min_uttspk : bool (default: True)
if True, remove all speakers with lower amount of utterances than
MINIMUM_UTT_PER_SPEAKERS
n_speakers : {None, int} (default: None)
if given, downsample the dataset by given number of speakers
save_path : {None, str} (default: None)
if given, pickle all filtered files to disk
"""
if min_dur is None:
min_dur = MINIMUM_UTT_DURATION
if min_utt is None:
min_utt = MINIMUM_UTT_PER_SPEAKERS
minimum_amount_of_frames = min_dur / Config.STEP_LENGTH
save_data = {}
prog = Progbar(target=len(indices),
print_report=True, print_summary=True,
name='Filtering broken utterances: %s' % title)
prog.set_summarizer('zero-length', fn=lambda x: x[-1])
prog.set_summarizer('min-frames', fn=lambda x: x[-1])
prog.set_summarizer('zero-var', fn=lambda x: x[-1])
prog.set_summarizer('small-var', fn=lambda x: x[-1])
prog.set_summarizer('overflow', fn=lambda x: x[-1])
# ====== mpi function for checking ====== #
@nb.jit(nopython=True, nogil=True)
def _fast_mean_var_ax0(z):
# using this function for calculating mean and variance
# can double the speed but cannot check overflow,
# only accept float32 or float64 input
s1 = np.zeros(shape=(z.shape[1],), dtype=z.dtype)
s2 = np.zeros(shape=(z.shape[1],), dtype=z.dtype)
for i in range(z.shape[0]):
s1 += z[i]
s2 += np.power(z[i], 2)
mean = s1 / z.shape[0]
var = s2 / z.shape[0] - np.power(mean, 2)
return mean, var
def _mpi_func(jobs):
for name, (start, end) in jobs:
y = X[start:end]
# flags
is_zero_len = False
is_zero_var = False
is_small_var = False
is_min_frames = False
is_overflow = False
# checking length
if y.shape[0] == 0:
is_zero_len = True
elif y.shape[0] < minimum_amount_of_frames:
is_min_frames = True
# checking statistics
else:
with catch_warnings_error(RuntimeWarning):
try:
# mean = np.mean(y, axis=-1)
var = np.var(y, axis=-1)
# min_val = np.min(y, axis=-1)
# max_val = np.max(y, axis=-1)
# numerical unstable
except RuntimeWarning as w:
if 'overflow encountered' in str(w):
is_overflow = True
else:
print(name, ':', w)
# process with more numerical filtering
else:
if np.any(np.isclose(var, 0)):
is_zero_var = True
# very heuristic and aggressive here
# filter-out anything with ~16.67% of low-var
# this could remove 1/3 of the original data
if np.sum(var < 0.01) > (len(y) / 6):
is_small_var = True
# return the flags
yield (name, is_zero_len, is_min_frames,
is_zero_var, is_small_var,
is_overflow)
# ====== running the multiprocessing filter ====== #
zero_len_files = {}
min_frame_files = {}
zero_var_files = {}
small_var_files = {}
overflow_files = {}
for res in mpi.MPI(jobs=sorted(indices.items(),
key=lambda x: x[1][0]),
func=_mpi_func,
ncpu=NCPU if ncpu is None else int(ncpu),
batch=250):
name = res[0]
if res[1]: zero_len_files[name] = 1
if res[2]: min_frame_files[name] = 1
if res[3]: zero_var_files[name] = 1
if res[4]: small_var_files[name] = 1
if res[5]: overflow_files[name] = 1
# update progress
prog['name'] = name[:48]
prog['zero-length'] = len(zero_len_files)
prog['min-frames'] = len(min_frame_files)
prog['zero-var'] = len(zero_var_files)
prog['small-var'] = len(small_var_files)
prog['overflow'] = len(overflow_files)
prog.add(1)
# ====== remove broken files ====== #
if not bool(remove_min_length):
min_frame_files = {}
new_indices = {name: (start, end)
for name, (start, end) in indices.items()
if name not in zero_len_files and
name not in min_frame_files and
name not in zero_var_files and
name not in small_var_files and
name not in overflow_files}
print("Filtered #utterances: %s/%s (files)" %
(ctext(len(indices) - len(new_indices), 'lightcyan'),
ctext(len(indices), 'cyan')))
indices = new_indices
# ====== store save data ====== #
save_data['zero_len'] = zero_len_files
save_data['min_dur'] = min_frame_files
save_data['zero_var'] = zero_var_files
save_data['small_var'] = small_var_files
save_data['overflow'] = overflow_files
# ====== filter-out by number of utt-per-speaker ====== #
if bool(remove_min_uttspk):
spk2utt = defaultdict(list)
for name in indices.keys():
spk2utt[spkid[name]].append(name)
n_utt_removed = 0
n_spk_removed = 0
removed_utt = []
keep_utt = []
for spk, utt in spk2utt.items():
if len(utt) < min_utt:
n_utt_removed += len(utt)
n_spk_removed += 1
removed_utt += utt
else:
keep_utt += utt
removed_utt = set(removed_utt)
keep_utt = set(keep_utt)
save_data['min_utt'] = removed_utt
print("Removed min-utt/spk: %s/%s(utt) %s/%s(spk)" % (
ctext(n_utt_removed, 'lightcyan'), ctext(len(indices), 'cyan'),
ctext(n_spk_removed, 'lightcyan'), ctext(len(spk2utt), 'cyan')
))
assert len(indices) == n_utt_removed + len(keep_utt), "Not possible!"
indices = {name: (start, end)
for name, (start, end) in indices.items()
if name in keep_utt}
# ====== sample by number of speakers ====== #
if isinstance(n_speakers, Number) and n_speakers > 0:
spk2utt = defaultdict(list)
for name, (start, end) in indices.items():
spk2utt[spkid[name]].append((name, (start, end)))
n_org_spk = len(spk2utt)
n_org_ids = len(indices)
# only need down-sampling with smaller number of speaker
if n_speakers < n_org_spk:
rand = np.random.RandomState(seed=Config.SUPER_SEED)
tmp = list(spk2utt.keys())
rand.shuffle(tmp)
sampled_spk = tmp[:n_speakers]
indices = []
for spk in sampled_spk:
indices += spk2utt[spk]
indices = dict(indices)
else:
sampled_spk = spk2utt
# print some log
print("Selected: %s/%s(spk) which have %s/%s(utt)" % (
ctext(len(sampled_spk), 'lightcyan'), ctext(n_org_spk, 'cyan'),
ctext(len(indices), 'lightcyan'), ctext(n_org_ids, 'cyan')
))
# ====== return the new indices ====== #
if save_path is not None:
try:
with open(save_path, 'wb') as save_file:
pickle.dump(save_data, save_file)
except Exception as e:
print("Cannot save filtering data to path: '%s', error: '%s'" %
(save_path, str(e)))
return indices
def prepare_dnn_data(save_dir, feat_name=None,
utt_length=None, seq_mode=None,
min_dur=None, min_utt=None,
exclude=None, train_proportion=None,
return_dataset=False):
assert os.path.isdir(save_dir), \
"Path to '%s' is not a directory" % save_dir
if feat_name is None:
feat_name = FEATURE_NAME
if utt_length is None:
utt_length = int(_args.utt)
if seq_mode is None:
seq_mode = str(_args.seq).strip().lower()
if min_dur is None:
min_dur = MINIMUM_UTT_DURATION
if min_utt is None:
min_utt = MINIMUM_UTT_PER_SPEAKERS
if exclude is None:
exclude = str(_args.exclude).strip()
print("Minimum duration: %s(s)" % ctext(min_dur, 'cyan'))
print("Minimum utt/spk : %s(utt)" % ctext(min_utt, 'cyan'))
# ******************** prepare dataset ******************** #
path = os.path.join(PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE)
assert os.path.exists(path), "Cannot find acoustic dataset at path: %s" % path
ds = F.Dataset(path=path, read_only=True)
rand = np.random.RandomState(seed=Config.SUPER_SEED)
# ====== find the right feature ====== #
assert feat_name in ds, "Cannot find feature with name: %s" % feat_name
X = ds[feat_name]
ids_name = 'indices_%s' % feat_name
assert ids_name in ds, "Cannot find indices with name: %s" % ids_name
# ====== basic path ====== #
path_filtered_data = os.path.join(save_dir, 'filtered_files.pkl')
path_train_files = os.path.join(save_dir, 'train_files.pkl')
path_speaker_info = os.path.join(save_dir, 'speaker_info.pkl')
# ******************** cannot find cached data ******************** #
if any(not os.path.exists(p) for p in [path_filtered_data,
path_train_files,
path_speaker_info]):
# ====== exclude some dataset ====== #
if len(exclude) > 0:
exclude_dataset = {i: 1 for i in exclude.split(',')}
print("* Excluded dataset:", ctext(exclude_dataset, 'cyan'))
indices = {name: (start, end)
for name, (start, end) in ds[ids_name].items()
if ds['dsname'][name] not in exclude_dataset}
# special case exclude all the noise data
if 'noise' in exclude_dataset:
indices = {name: (start, end)
for name, (start, end) in indices.items()
if '/' not in name}
else:
indices = {i: j for i, j in ds[ids_name].items()}
# ====== down-sampling if necessary ====== #
if _args.downsample > 1000:
dataset2name = defaultdict(list)
# ordering the indices so we sample the same set every time
for name in sorted(indices.keys()):
dataset2name[ds['dsname'][name]].append(name)
n_total_files = len(indices)
n_sample_files = int(_args.downsample)
# get the percentage of each dataset
dataset2per = {i: len(j) / n_total_files
for i, j in dataset2name.items()}
# sampling based on percentage
_ = {}
for dsname, flist in dataset2name.items():
rand.shuffle(flist)
n_dataset_files = int(dataset2per[dsname] * n_sample_files)
_.update({i: indices[i]
for i in flist[:n_dataset_files]})
indices = _
# ====== * filter out "bad" sample ====== #
indices = filter_utterances(X=X, indices=indices, spkid=ds['spkid'],
min_utt=min_utt, min_dur=min_dur,
remove_min_length=True,
remove_min_uttspk=True,
n_speakers=None, ncpu=None,
save_path=path_filtered_data)
# ====== all training file name ====== #
# modify here to train full dataset
all_name = sorted(indices.keys())
rand.shuffle(all_name); rand.shuffle(all_name)
n_files = len(all_name)
print("#Files:", ctext(n_files, 'cyan'))
# ====== speaker mapping ====== #
name2spk = {name: ds['spkid'][name]
for name in all_name}
all_speakers = sorted(set(name2spk.values()))
spk2label = {spk: i
for i, spk in enumerate(all_speakers)}
name2label = {name: spk2label[spk]
for name, spk in name2spk.items()}
assert len(name2label) == len(all_name)
print("#Speakers:", ctext(len(all_speakers), 'cyan'))
# ====== stratify sampling based on speaker ====== #
valid_name = []
# create speakers' cluster
label2name = defaultdict(list)
for name, label in sorted(name2label.items(),
key=lambda x: x[0]):
label2name[label].append(name)
# for each speaker with >= 3 utterance
for label, name_list in sorted(label2name.items(),
key=lambda x: x[0]):
if len(name_list) < 3:
continue
n = max(1, int(0.05 * len(name_list))) # 5% for validation
valid_name += rand.choice(a=name_list, size=n, replace=False).tolist()
# train list is the rest
_ = set(valid_name)
train_name = [i for i in all_name if i not in _]
# ====== split training and validation ====== #
train_indices = {name: indices[name] for name in train_name}
valid_indices = {name: indices[name] for name in valid_name}
# ====== save cached data ====== #
with open(path_train_files, 'wb') as fout:
pickle.dump({'train': train_indices, 'valid': valid_indices},
fout)
with open(path_speaker_info, 'wb') as fout:
pickle.dump({'all_speakers': all_speakers,
'name2label': name2label,
'spk2label': spk2label},
fout)
# ******************** load cached data ******************** #
else:
with open(path_train_files, 'rb') as fin:
obj = pickle.load(fin)
train_indices = obj['train']
valid_indices = obj['valid']
with open(path_speaker_info, 'rb') as fin:
obj = pickle.load(fin)
all_speakers = obj['all_speakers']
name2label = obj['name2label']
spk2label = obj['spk2label']
# ******************** print log ******************** #
def summary_indices(ids):
datasets = defaultdict(int)
speakers = defaultdict(list)
text = ''
for name in sorted(ids.keys()):
text += name + str(ids[name])
dsname = ds['dsname'][name]
datasets[dsname] += 1
speakers[dsname].append(ds['spkid'][name])
for dsname in sorted(datasets.keys()):
print(' %-18s: %s(utt) %s(spk)' % (
dsname,
ctext('%6d' % datasets[dsname], 'cyan'),
ctext(len(set(speakers[dsname])), 'cyan')))
print(' MD5 checksum:', ctext(crypto.md5_checksum(text), 'lightcyan'))
# ====== training files ====== #
print("#Train files:", ctext('%-8d' % len(train_indices), 'cyan'),
"#spk:", ctext(len(set(name2label[name]
for name in train_indices.keys())), 'cyan'),
"#noise:", ctext(len([name for name in train_indices.keys()
if '/' in name]), 'cyan'))
summary_indices(ids=train_indices)
# ====== valid files ====== #
print("#Valid files:", ctext('%-8d' % len(valid_indices), 'cyan'),
"#spk:", ctext(len(set(name2label[name]
for name in valid_indices.keys())), 'cyan'),
"#noise:", ctext(len([name for name in valid_indices.keys()
if '/' in name]), 'cyan'))
summary_indices(ids=valid_indices)
# ******************** create the recipe ******************** #
assert all(name in name2label
for name in train_indices.keys())
assert all(name in name2label
for name in valid_indices.keys())
recipes = prepare_dnn_feeder_recipe(name2label=name2label,
n_speakers=len(all_speakers),
utt_length=utt_length, seq_mode=seq_mode)
# ====== downsample training set for analyzing if required ====== #
if train_proportion is not None:
assert 0 < train_proportion < 1
n_training = len(train_indices)
train_indices = list(train_indices.items())
rand.shuffle(train_indices); rand.shuffle(train_indices)
train_indices = dict(train_indices[:int(n_training * train_proportion)])
# ====== create feeder ====== #
train_feeder = F.Feeder(
data_desc=F.IndexedData(data=X,
indices=train_indices),
batch_mode='batch', ncpu=NCPU, buffer_size=256)
valid_feeder = F.Feeder(
data_desc=F.IndexedData(data=X,
indices=valid_indices),
batch_mode='batch', ncpu=max(2, NCPU // 4), buffer_size=64)
train_feeder.set_recipes(recipes)
valid_feeder.set_recipes(recipes)
print(train_feeder)
print(valid_feeder)
# ====== debugging ====== #
if IS_DEBUGGING:
import matplotlib
matplotlib.use('Agg')
prog = Progbar(target=len(valid_feeder), print_summary=True,
name="Iterating validation set")
samples = []
n_visual = 250
for name, idx, X, y in valid_feeder.set_batch(batch_size=100000,
batch_mode='file',
seed=None, shuffle_level=0):
assert idx == 0, "Utterances longer than %.2f(sec)" % (100000 * Config.STEP_LENGTH)
prog['X'] = X.shape
prog['y'] = y.shape
prog.add(X.shape[0])
# random sampling
if rand.rand(1) < 0.5 and len(samples) < n_visual:
for i in rand.randint(0, X.shape[0], size=4, dtype='int32'):
samples.append((name, X[i], np.argmax(y[i], axis=-1)))
# plot the spectrogram
n_visual = len(samples)
V.plot_figure(nrow=n_visual, ncol=8)
for i, (name, X, y) in enumerate(samples):
is_noise = '/' in name
assert name2label[name] == y, "Speaker label mismatch for file: %s" % name
name = name.split('/')[0]
dsname = ds['dsname'][name]
spkid = ds['spkid'][name]
y = np.argmax(y, axis=-1)
ax = V.plot_spectrogram(X.T,
ax=(n_visual, 1, i + 1),
title='#%d' % (i + 1))
ax.set_title('[%s][%s]%s %s' %
('noise' if is_noise else 'clean', dsname, name, spkid),
fontsize=6)
# don't need to be high resolutions
V.plot_save('/tmp/tmp.pdf', dpi=12)
exit()
# ====== return ====== #
if bool(return_dataset):
return train_feeder, valid_feeder, all_speakers, ds
return train_feeder, valid_feeder, all_speakers
# ===========================================================================
# Evaluation and validation helper
# ===========================================================================
def validate_features_dataset(output_dataset_path, ds_validation_path):
ds = F.Dataset(output_dataset_path, read_only=True)
print(ds)
features = {}
for key, val in ds.items():
if 'indices_' in key:
name = key.split('_')[-1]
features[name] = (val, ds[name])
all_indices = [val[0] for val in features.values()]
# ====== sampling 250 files ====== #
all_files = sampling_iter(it=all_indices[0].keys(), k=250,
seed=Config.SUPER_SEED)
all_files = [f for f in all_files
if all(f in ids for ids in all_indices)]
print("#Samples:", ctext(len(all_files), 'cyan'))
# ====== ignore the 20-figures warning ====== #
with catch_warnings_ignore(RuntimeWarning):
for file_name in all_files:
X = {}
for feat_name, (ids, data) in features.items():
start, end = ids[file_name]
X[feat_name] = data[start:end][:].astype('float32')
V.plot_multiple_features(features=X, fig_width=20,
title='[%s]%s' % (ds['dsname'][file_name], file_name))
V.plot_save(ds_validation_path, dpi=12)
| {
"repo_name": "imito/odin",
"path": "examples/nist_sre/helpers.py",
"copies": "1",
"size": "40884",
"license": "mit",
"hash": 722396515581426400,
"line_mean": 38.8868292683,
"line_max": 105,
"alpha_frac": 0.5609773995,
"autogenerated": false,
"ratio": 3.407567927987998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44685453274879977,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pickle
import numpy as np
from bigarray import MmapArray, MmapArrayWriter
from odin.ml.base import BaseEstimator, DensityMixin, TransformerMixin
from odin.ml.gmm_tmat import GMM, Tmatrix, _split_jobs
from odin.utils import (Progbar, UnitTimer, batching, crypto, ctext,
is_primitives, mpi, uuid)
# ===========================================================================
# Helper
# ===========================================================================
def _extract_zero_and_first_stats(X, sad, indices, gmm, z_path, f_path,
name_path):
n_samples = X.shape[0]
# indices is None, every row is single sample (utterance or image ...)
if indices is None:
if os.path.exists(z_path):
os.remove(z_path)
if os.path.exists(f_path):
os.remove(f_path)
Z = MmapArrayWriter(path=z_path,
dtype='float32',
shape=(n_samples, gmm.nmix),
remove_exist=True)
F = MmapArrayWriter(path=f_path,
dtype='float32',
shape=(n_samples, gmm.feat_dim * gmm.nmix),
remove_exist=True)
jobs, _ = _split_jobs(n_samples,
ncpu=mpi.cpu_count(),
device='cpu',
gpu_factor=1)
def map_transform(start_end):
start, end = start_end
for i in range(start, end):
# removed by SAD
if sad is not None and not bool(sad[i]):
yield None, None, None
else:
z, f = gmm.transform(X[i][np.newaxis, :],
zero=True,
first=True,
device='cpu')
yield i, z, f
prog = Progbar(target=n_samples,
print_report=True,
print_summary=False,
name="Extracting zero and first order statistics")
for i, z, f in mpi.MPI(jobs, map_transform, ncpu=None, batch=1):
if i is not None: # i None means removed by SAD
Z[i] = z
F[i] = f
prog.add(1)
Z.flush()
F.flush()
Z.close()
F.close()
# use directly the transform_to_disk function
else:
gmm.transform_to_disk(X,
indices=indices,
sad=sad,
pathZ=z_path,
pathF=f_path,
name_path=name_path,
dtype='float32',
device=None,
ncpu=None,
override=True)
# ===========================================================================
# Fast combined GMM-Tmatrix training for I-vector extraction
# ===========================================================================
class Ivector(DensityMixin, BaseEstimator, TransformerMixin):
""" Ivector extraction using GMM and T-matrix """
def __init__(self,
path,
nmix=None,
tv_dim=None,
nmix_start=1,
niter_gmm=16,
niter_tmat=16,
allow_rollback=True,
exit_on_error=False,
downsample=1,
stochastic_downsample=True,
device='gpu',
ncpu=1,
gpu_factor_gmm=80,
gpu_factor_tmat=3,
dtype='float32',
seed=1234,
name=None):
super(Ivector, self).__init__()
# ====== auto store arguments ====== #
for key, val in locals().items():
if key in ('self', 'path', 'seed'):
continue
setattr(self, key, val)
# ====== create random generator ====== #
self._rand = np.random.RandomState(seed=seed)
# ====== check path ====== #
path = str(path)
if not os.path.exists(path):
os.mkdir(path)
elif not os.path.isdir(path):
raise ValueError("Path to '%s' is not a directory" % str(path))
self._path = path
self._gmm = None
self._tmat = None
# ==================== properties ==================== #
@property
def gmm(self):
if self._gmm is None:
if os.path.exists(self.gmm_path):
with open(self.gmm_path, 'rb') as f:
self._gmm = pickle.load(f)
assert self._gmm.nmix == self.nmix, \
"Require GMM with %d components, but found %s, at path: '%s'" % \
(self.nmix, str(self._gmm), self.gmm_path)
else:
self._gmm = GMM(nmix=self.nmix,
niter=self.niter_gmm,
dtype=self.dtype,
downsample=self.downsample,
stochastic_downsample=self.stochastic_downsample,
device=self.device,
ncpu=self.ncpu,
gpu_factor=self.gpu_factor_gmm,
seed=1234,
path=self.gmm_path,
name="IvecGMM_%s" %
(self.name if self.name is not None else str(
self._rand.randint(10e8))))
return self._gmm
@property
def tmat(self):
if self._tmat is None:
if os.path.exists(self.tmat_path):
with open(self.tmat_path, 'rb') as f:
self._tmat = pickle.load(f)
assert self._tmat.tv_dim == self.tv_dim, \
"Require T-matrix with %d dimensions, but found %s, at path: '%s'" % \
(self.tv_dim, str(self._tmat), self.tmat_path)
else:
self._tmat = Tmatrix(tv_dim=self.tv_dim,
gmm=self.gmm,
niter=self.niter_tmat,
dtype=self.dtype,
device=self.device,
ncpu=self.ncpu,
gpu_factor=self.gpu_factor_tmat,
cache_path='/tmp',
seed=1234,
path=self.tmat_path,
name='IvecTmat_%s' %
(self.name if self.name is not None else str(
self._rand.randint(10e8))))
return self._tmat
@property
def path(self):
return self._path
@property
def gmm_path(self):
return os.path.join(self.path, 'gmm.pkl')
@property
def tmat_path(self):
return os.path.join(self.path, 'tmat.pkl')
@property
def z_path(self):
""" Path to zero-th order statistics of the training data"""
return os.path.join(self.path, 'zstat_train')
@property
def f_path(self):
""" Path to first order statistics of the training data """
return os.path.join(self.path, 'fstat_train')
@property
def ivec_path(self):
""" Path to first order statistics of the training data """
return os.path.join(self.path, 'ivec_train')
@property
def name_path(self):
""" In case indices is given during training, the order of
processed files is store at this path """
return os.path.join(self.path, 'name_train')
@property
def feat_dim(self):
return self.gmm.feat_dim
# ==================== state query ==================== #
@property
def is_gmm_fitted(self):
return self.gmm.is_fitted
@property
def is_tmat_fitted(self):
return self.is_gmm_fitted and self.tmat.is_fitted
@property
def is_fitted(self):
return self.is_gmm_fitted and self.is_tmat_fitted
# ====== getter ====== #
def get_z_path(self, name=None):
""" Return the path the zero-order statistics
according to the given name as identification during
`Ivector.transform`
If name is None, return `Ivector.z_path`
"""
if name is None:
return self.z_path
return os.path.join(self.path, 'zstat_%s' % name)
def get_f_path(self, name):
""" Return the path the first-order statistics
according to the given name as identification during
`Ivector.transform`
If name is None, return `Ivector.f_path`
"""
if name is None:
return self.f_path
return os.path.join(self.path, 'fstat_%s' % name)
def get_i_path(self, name):
""" Return the path the extracted i-vectors
according to the given name as identification during
`Ivector.transform`
If name is None, return `Ivector.ivec_path`
"""
if name is None:
return self.ivec_path
return os.path.join(self.path, 'ivec_%s' % name)
def get_name_path(self, name):
""" Return the path of the name list if indices is used
according to the given name as identification during
`Ivector.transform`
If name is None, return `Ivector.name_path`
"""
if name is None:
return self.name_path
return os.path.join(self.path, 'name_%s' % name)
# ==================== sklearn methods ==================== #
def fit(self,
X,
indices=None,
sad=None,
refit_gmm=False,
refit_tmat=False,
extract_ivecs=False,
keep_stats=False):
"""
Parameters
----------
X : ndarray
Training data [n_samples, n_features]
indices : {Mapping, tuple, list}
in case the data is given by a list of files, `indices`
act as file indicator mapping from
'file_name' -> (start_index_in_X, end_index_in_X)
This mapping can be provided by a dictionary, or list of
tuple.
Note: the order provided in indices will be preserved
sad : ndarray
inspired by the "Speech Activity Detection" (SAD) indexing,
this array is indicator of which samples will be taken into
training; the shape should be [n_samples,] or [n_samples, 1]
refit_gmm : bool
if True, re-fit the GMM even though it is fitted,
consequently, the T-matrix will be re-fitted
refit_tmat : bool
if True, re-fit the T-matrix even though it is fitted
extract_ivecs : bool
if True, extract the i-vector for training data
keep_stats : bool
if True, keep the zero and first order statistics.
The first order statistics could consume huge amount
of disk space. Otherwise, they are deleted after training
"""
new_gmm = (not self.gmm.is_fitted or refit_gmm)
# ====== clean error files ====== #
if os.path.exists(self.z_path):
Z = MmapArray(self.z_path)
if Z.shape[0] == 0: # empty file
os.remove(self.z_path)
Z.close()
if os.path.exists(self.f_path):
F = MmapArray(self.f_path)
if F.shape[0] == 0: # empty file
os.remove(self.f_path)
F.close()
if os.path.exists(self.ivec_path):
ivec = MmapArray(self.ivec_path)
if ivec.shape[0] == 0: # empty file
os.remove(self.ivec_path)
ivec.close()
# ====== Training the GMM first ====== #
if new_gmm:
input_data = [X]
if sad is not None:
input_data.append(sad)
if indices is not None:
input_data.append(indices)
self.gmm.fit(input_data)
# ====== some fun, and confusing logics ====== #
# GMM need to be fitted before creating T-matrix model
new_tmat = (not self.tmat.is_fitted or new_gmm or refit_tmat)
# New I-vector is need when:
# - only when `extract_ivecs=True`
# - and new T-matrix is trained but no I-vector is extracted
new_ivec = extract_ivecs and \
(new_tmat or not os.path.exists(self.ivec_path))
# new stats is only needed when
# - GMM is updated
# - training new Tmatrix and the Z and F not exist
# - extracting new I-vector and the Z and F not exist
if not new_gmm and \
(os.path.exists(self.z_path) and os.path.exists(self.f_path)):
new_stats = False
else:
new_stats = new_gmm or new_tmat or new_ivec
# ====== extract the statistics ====== #
if new_stats:
_extract_zero_and_first_stats(X=X,
sad=sad,
indices=indices,
gmm=self.gmm,
z_path=self.z_path,
f_path=self.f_path,
name_path=self.name_path)
# ====== Training the T-matrix and extract i-vector ====== #
if new_tmat or new_ivec:
Z = MmapArray(path=self.z_path)
F = MmapArray(path=self.f_path)
if new_tmat:
self.tmat.fit((Z, F))
if new_ivec:
self.tmat.transform_to_disk(path=self.ivec_path,
Z=Z,
F=F,
dtype='float32',
device='gpu',
override=True)
Z.close()
F.close()
# ====== clean ====== #
if not keep_stats:
if os.path.exists(self.z_path):
os.remove(self.z_path)
if os.path.exists(self.f_path):
os.remove(self.f_path)
return self
def transform(self,
X,
indices=None,
sad=None,
save_ivecs=False,
keep_stats=False,
name=None):
"""
Parameters
----------
X : ndarray
Training data [n_samples, n_features]
indices : {Mapping, tuple, list}
in case the data is given by a list of files, `indices`
act as file indicator mapping from
'file_name' -> (start_index_in_X, end_index_in_X)
This mapping can be provided by a dictionary, or list of
tuple.
sad : ndarray
inspired by the "Speech Activity Detection" (SAD) indexing,
this array is indicator of which samples will be taken into
training; the shape should be [n_samples,] or [n_samples, 1]
save_ivecs : bool
if True, save extracted i-vectors to disk at path `ivec_[name]`
if False, return directly the i-vectors without saving
keep_stats : bool
if True, keep the zero and first order statistics.
The first order statistics could consume huge amount
of disk space. Otherwise, they are deleted after training
name : {None, str}
identity of the i-vectors (for re-using in future).
If None, a random name is used
"""
if not self.is_fitted:
raise ValueError(
"Ivector has not been fitted, call Ivector.fit(...) first")
n_files = X.shape[0] if indices is None else len(indices)
if name is None:
name = uuid(length=8)
else:
name = str(name)
# ====== init ====== #
z_path = self.get_z_path(name)
f_path = self.get_f_path(name)
if save_ivecs:
i_path = self.get_i_path(name)
else:
i_path = None
name_path = self.get_name_path(name)
# ====== check exist i-vector file ====== #
if i_path is not None and os.path.exists(i_path):
ivec = MmapArray(path=i_path)
assert ivec.shape[0] == n_files and ivec.shape[1] == self.tv_dim,\
"Need i-vectors for %d files, found exists data at path:'%s' with shape:%s" % \
(n_files, i_path, ivec.shape)
return ivec
# ====== extract Z and F ====== #
if os.path.exists(z_path) and os.path.exists(f_path):
pass
else:
if os.path.exists(z_path):
os.remove(z_path)
if os.path.exists(f_path):
os.remove(f_path)
if os.path.exists(name_path):
os.remove(name_path)
_extract_zero_and_first_stats(X=X,
sad=sad,
indices=indices,
gmm=self.gmm,
z_path=z_path,
f_path=f_path,
name_path=name_path)
Z = MmapArray(path=z_path)
F = MmapArray(path=f_path)
# ====== extract I-vec ====== #
ivec = self.tmat.transform_to_disk(path=i_path, Z=Z, F=F, dtype='float32')
# ====== clean ====== #
Z.close()
F.close()
if not keep_stats:
if os.path.exists(z_path):
os.remove(z_path)
if os.path.exists(f_path):
os.remove(f_path)
else:
print("Zero-order stats saved at:", ctext(z_path, 'cyan'))
print("First-order stats saved at:", ctext(f_path, 'cyan'))
return ivec
def __str__(self):
s = ''
s += ctext("<Ivector ", 'yellow')
s += "GMM:%s " % self.is_gmm_fitted
s += "Tmat:%s\n" % self.is_tmat_fitted
if os.path.exists(self.path) and len(os.listdir(self.path)) > 0:
# list all model files
s += " %s: " % ctext('model', 'cyan')
s += ', '.join([
'"%s"' % f
for f in sorted(os.listdir(self.path))
if 'zstat' not in f and 'fstat' not in f and 'ivec' not in f and
'name_' not in f
])
s += '\n'
# list all Zero-stats files
s += " %s: " % ctext('Z-stats', 'cyan')
s += ', '.join(
['"%s"' % f for f in sorted(os.listdir(self.path)) if 'zstat' in f])
s += '\n'
# list all First-stats files
s += " %s: " % ctext('F-stats', 'cyan')
s += ', '.join(
['"%s"' % f for f in sorted(os.listdir(self.path)) if 'fstat' in f])
s += '\n'
# list all Ivec-stats files
s += " %s: " % ctext('ivec', 'cyan')
s += ', '.join(
['"%s"' % f for f in sorted(os.listdir(self.path)) if 'ivec' in f])
s += '\n'
# list all Name path files
s += " %s: " % ctext('name-list', 'cyan')
s += ', '.join(
['"%s"' % f for f in sorted(os.listdir(self.path)) if 'name_' in f])
s += '\n'
# list all attributes
for k, v in sorted(self.__dict__.items(), key=lambda x: x[0]):
if is_primitives(v, inc_ndarray=False):
s += " %s: %s\n" % (ctext(k, 'cyan'), str(v))
s = s[:-1] + '>'
return s
| {
"repo_name": "imito/odin",
"path": "odin/ml/ivector.py",
"copies": "1",
"size": "17873",
"license": "mit",
"hash": 4750664660206398000,
"line_mean": 33.908203125,
"line_max": 85,
"alpha_frac": 0.5169809209,
"autogenerated": false,
"ratio": 3.6180161943319837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46349971152319835,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import platform
import numpy as np
from matplotlib.colors import ColorConverter
from qtpy import QtWidgets
from glue.core.message import SettingsChangeMessage
from glue.utils import nonpartial
from glue.utils.qt import load_ui, ColorProperty, get_qapp
from glue.utils.qt.widget_properties import (CurrentComboTextProperty,
ValueProperty, ButtonProperty)
from glue._settings_helpers import save_settings
__all__ = ["PreferencesDialog"]
rgb = ColorConverter().to_rgb
class PreferencesDialog(QtWidgets.QDialog):
theme = CurrentComboTextProperty('ui.combo_theme')
background = ColorProperty('ui.color_background')
foreground = ColorProperty('ui.color_foreground')
data_color = ColorProperty('ui.color_default_data')
data_alpha = ValueProperty('ui.slider_alpha', value_range=(0, 1))
data_apply = ButtonProperty('ui.checkbox_apply')
show_large_data_warning = ButtonProperty('ui.checkbox_show_large_data_warning')
save_to_disk = ButtonProperty('ui.checkbox_save')
def __init__(self, application, parent=None):
super(PreferencesDialog, self).__init__(parent=parent)
self.app = application
self.ui = load_ui('preferences.ui', self,
directory=os.path.dirname(__file__))
self.ui.cancel.clicked.connect(self.reject)
self.ui.ok.clicked.connect(self.accept)
self.ui.combo_theme.currentIndexChanged.connect(nonpartial(self._update_colors_from_theme))
# The following is needed because of a bug in Qt which means that
# tab titles don't get scaled right.
if platform.system() == 'Darwin':
app = get_qapp()
app_font = app.font()
self.ui.tab_widget.setStyleSheet('font-size: {0}px'.format(app_font.pointSize()))
from glue.config import settings
self.background = settings.BACKGROUND_COLOR
self.foreground = settings.FOREGROUND_COLOR
self.data_color = settings.DATA_COLOR
self.data_alpha = settings.DATA_ALPHA
self.show_large_data_warning = settings.SHOW_LARGE_DATA_WARNING
self._update_theme_from_colors()
self.panes = []
from glue.config import preference_panes
for label, widget_cls in sorted(preference_panes):
pane = widget_cls()
self.ui.tab_widget.addTab(pane, label)
self.panes.append(pane)
def _update_theme_from_colors(self):
if (rgb(self.background) == (1, 1, 1) and rgb(self.foreground) == (0, 0, 0)
and rgb(self.data_color) == (0.35, 0.35, 0.35) and np.allclose(self.data_alpha, 0.8)):
self.theme = 'Black on White'
elif (rgb(self.background) == (0, 0, 0) and rgb(self.foreground) == (1, 1, 1)
and rgb(self.data_color) == (0.75, 0.75, 0.75) and np.allclose(self.data_alpha, 0.8)):
self.theme = 'White on Black'
else:
self.theme = 'Custom'
def _update_colors_from_theme(self):
if self.theme == 'Black on White':
self.foreground = 'black'
self.background = 'white'
self.data_color = '0.35'
self.data_alpha = 0.8
elif self.theme == 'White on Black':
self.foreground = 'white'
self.background = 'black'
self.data_color = '0.75'
self.data_alpha = 0.8
elif self.theme != 'Custom':
raise ValueError("Unknown theme: {0}".format(self.theme))
def accept(self):
# Update default settings
from glue.config import settings
settings.FOREGROUND_COLOR = self.foreground
settings.BACKGROUND_COLOR = self.background
settings.DATA_COLOR = self.data_color
settings.DATA_ALPHA = self.data_alpha
settings.SHOW_LARGE_DATA_WARNING = self.show_large_data_warning
for pane in self.panes:
pane.finalize()
# Save to disk if requested
if self.save_to_disk:
save_settings()
# Trigger viewers to update defaults
self.app._hub.broadcast(SettingsChangeMessage(self, ('FOREGROUND_COLOR', 'BACKGROUND_COLOR')))
# If requested, trigger data to update color
if self.data_apply:
self.app.set_data_color(settings.DATA_COLOR, settings.DATA_ALPHA)
super(PreferencesDialog, self).accept()
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
widget = PreferencesDialog()
widget.show()
widget.raise_()
app.exec_()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/app/qt/preferences.py",
"copies": "1",
"size": "4644",
"license": "bsd-3-clause",
"hash": 2006968099715392000,
"line_mean": 34.1818181818,
"line_max": 102,
"alpha_frac": 0.6300602929,
"autogenerated": false,
"ratio": 3.800327332242226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4930387625142226,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pytest
from mock import patch
from glue.tests.helpers import requires_qt
from ..core import Data
from ..main import die_on_error, load_data_files, main, start_glue
@requires_qt
def test_die_on_error_exception():
"""Decorator should spawn a QMessageBox and exit"""
with pytest.raises(SystemExit):
with patch('qtpy.QtWidgets.QMessageBox') as qmb:
@die_on_error('test_msg')
def test():
raise Exception()
test()
assert qmb.call_count == 1
def test_die_on_error_noexception():
"""Decorator should have no effect"""
@die_on_error('test_msg')
def test():
return 0
assert test() == 0
def test_load_data_files():
with patch('glue.core.data_factories.load_data') as ld:
ld.return_value = Data()
dc = load_data_files(['test.py'])
assert len(dc) == 1
def check_main(cmd, glue, config, data):
"""Pass command to main program, check for expected parsing"""
with patch('glue.main.start_glue') as sg:
main(cmd.split())
args, kwargs = sg.call_args
assert kwargs.get('datafiles', None) == data
assert kwargs.get('gluefile', None) == glue
assert kwargs.get('config', None) == config
def check_exec(cmd, pyfile):
"""Assert that main correctly dispatches to execute_script"""
with patch('glue.main.execute_script') as es:
main(cmd.split())
args, kwargs = es.call_args
assert args[0] == pyfile
def test_main_single_data():
check_main('glueqt test.fits', None, None, ['test.fits'])
def test_main_multi_data():
check_main('glueqt test.fits t2.csv', None, None, ['test.fits', 't2.csv'])
def test_main_config():
check_main('glueqt -c config.py', None, 'config.py', None)
def test_main_glu_arg():
check_main('glueqt -g test.glu', 'test.glu', None, None)
def test_main_auto_glu():
check_main('glueqt test.glu', 'test.glu', None, None)
def test_main_many_args():
check_main('glueqt -c config.py data.fits d2.csv', None,
'config.py', ['data.fits', 'd2.csv'])
def test_exec():
check_exec('glueqt -x test.py', 'test.py')
def test_auto_exec():
check_exec('glueqt test.py', 'test.py')
@requires_qt
def test_exec_real(tmpdir):
# Actually test the script execution functionlity
filename = tmpdir.join('test.py').strpath
with open(filename, 'w') as f:
f.write('a = 1')
with patch('qtpy.QtWidgets.QMessageBox') as qmb:
with patch('sys.exit') as exit:
main('glue -x {0}'.format(os.path.abspath(filename)).split())
assert exit.called_once_with(0)
@pytest.mark.parametrize(('cmd'), ['glueqt -g test.glu test.fits',
'glueqt -g test.py test.fits',
'glueqt -x test.py -g test.glu',
'glueqt -x test.py -c test.py',
'glueqt -x',
'glueqt -g',
'glueqt -c'])
def test_invalid(cmd):
with pytest.raises(SystemExit):
main(cmd.split())
@requires_qt
@pytest.mark.parametrize(('glue', 'config', 'data'),
[('test.glu', None, None),
(None, 'test.py', None),
(None, None, ['test.fits']),
(None, None, ['a.fits', 'b.fits']),
(None, 'test.py', ['a.fits'])])
def test_start(glue, config, data):
with patch('glue.main.restore_session') as rs:
with patch('glue.config.load_configuration') as lc:
with patch('glue.main.load_data_files') as ldf:
with patch('glue.app.qt.GlueApplication') as ga:
rs.return_value = ga
ldf.return_value = Data()
start_glue(glue, config, data)
if glue:
rs.assert_called_once_with(glue)
if config:
lc.assert_called_once_with(search_path=[config])
if data:
ldf.assert_called_once_with(data)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/tests/test_main.py",
"copies": "2",
"size": "4262",
"license": "bsd-3-clause",
"hash": -4296110484367957500,
"line_mean": 30.1094890511,
"line_max": 78,
"alpha_frac": 0.5516189582,
"autogenerated": false,
"ratio": 3.5605680868838765,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112187045083876,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import pytest
tb = pytest.importorskip('tables')
import numpy as np
from blaze.compute.core import compute
from blaze.expr import symbol
from blaze import drop, discover, create_index
from blaze.utils import tmpfile
t = symbol('t', 'var * {id: int, name: string, amount: int}')
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@pytest.yield_fixture
def data():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
yield d
d.close()
f.close()
@pytest.yield_fixture
def csi_data():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.cols.amount.create_csindex()
d.cols.id.create_csindex()
yield d
d.close()
f.close()
@pytest.yield_fixture
def idx_data():
with tmpfile('.h5') as fn:
f = tb.open_file(fn, mode='w')
d = f.create_table('/', 'title', x)
d.cols.amount.create_index()
d.cols.id.create_index()
yield d
d.close()
f.close()
def eq(a, b):
return (a == b).all()
def test_discover_datashape(data):
ds = discover(data)
t = symbol('t', ds)
columns = t.fields
assert columns is not None
def test_symbol(data):
assert compute(t, data) == data
assert isinstance(data, tb.Table)
def test_single_column(data):
assert eq(compute(t['name'], data), x['name'])
def test_projection(data):
assert eq(compute(t[['name', 'amount']], data), x[['name', 'amount']])
def test_eq(data):
assert eq(compute(t['amount'] == 100, data), x['amount'] == 100)
def test_scalar_ops(data):
from operator import add, sub, mul, truediv
for op in (add, sub, mul, truediv):
assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))
assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))
assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))
assert eq(compute(op(10, t.amount), data), op(10, x['amount']))
def test_neg(data):
assert eq(compute(-t.amount, data), -x['amount'])
def test_failing_floordiv(data):
from operator import floordiv as op
with pytest.raises(TypeError):
assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))
with pytest.raises(TypeError):
assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))
with pytest.raises(TypeError):
assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))
with pytest.raises(TypeError):
assert eq(compute(op(10, t.amount), data), op(10, x['amount']))
def test_selection(data):
assert eq(compute(t[t['amount'] == 100], data), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], data), x[x['amount'] < 0])
def test_arithmetic(data):
assert eq(compute(t['amount'] + t['id'], data), x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], data), x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], data), x['amount'] % x['id'])
assert eq(compute(t['amount'] + t['id'] + 3, data),
x['amount'] + x['id'] + 3)
def test_reductions(data):
assert compute(t['amount'].count(), data) == len(x['amount'])
assert compute(t['amount'].sum(), data) == x['amount'].sum()
assert compute(t['amount'].mean(), data) == x['amount'].mean()
assert compute(t.amount[0], data) == x['amount'][0]
assert compute(t.amount[-1], data) == x['amount'][-1]
class TestTopLevelReductions(object):
def test_count(self, data):
from blaze import count
assert compute(count(t['amount']), data) == len(x['amount'])
def test_sum(self, data):
from blaze import sum
assert compute(sum(t['amount']), data) == x['amount'].sum()
def test_mean(self, data):
from blaze import mean
assert compute(mean(t['amount']), data) == x['amount'].mean()
class TestFailingSort(object):
"""These fail because we haven't created a completely sorted index"""
def test_basic(self, data):
with pytest.raises(ValueError):
compute(t.sort('id'), data)
@pytest.mark.xfail(
raises=TypeError,
reason='PyTables does not support multiple column sorting'
)
def test_multiple_columns(self, data):
compute(t.sort(['amount', 'id']), data)
@pytest.mark.xfail(
raises=TypeError,
reason='PyTables does not support multiple column sorting'
)
def test_multiple_columns_sorted_data(self, csi_data):
compute(t.sort(['amount', 'id']), csi_data)
class TestCSISort(object):
def test_basic(self, csi_data):
assert eq(compute(t.sort('amount'), csi_data),
np.sort(x, order='amount'))
assert eq(compute(t.sort('id'), csi_data),
np.sort(x, order='id'))
def test_column_expr(self, csi_data):
assert eq(compute(t.sort(t.amount), csi_data),
np.sort(x, order='amount'))
assert eq(compute(t.sort(t.id), csi_data),
np.sort(x, order='id'))
def test_non_existent_column(self, csi_data):
with pytest.raises(ValueError):
compute(t.sort('not here'), csi_data)
def test_ascending(self, csi_data):
assert eq(compute(t.sort('amount', ascending=False), csi_data),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort('amount', ascending=False), csi_data),
np.sort(x, order='amount')[::-1])
class TestIndexSort(object):
"""Fails with a partially sorted index"""
@pytest.mark.xfail(
raises=ValueError,
reason='PyTables cannot sort with a standard index'
)
def test_basic(self, idx_data):
compute(t.sort('amount'), idx_data)
@pytest.mark.xfail(
raises=ValueError,
reason='PyTables cannot sort with a standard index'
)
def test_ascending(self, idx_data):
compute(t.sort('amount', ascending=False), idx_data)
def test_head(data):
assert eq(compute(t.head(2), data), x[:2])
assert eq(compute(t.amount.head(2), data), x['amount'][:2])
@pytest.yield_fixture
def pyt():
tb = pytest.importorskip('tables')
fn = 'test.pyt.h5'
f = tb.open_file(fn, mode='w')
d = f.create_table('/', 'test', x)
try:
yield d
finally:
d.close()
f.close()
try:
os.remove(fn)
except OSError:
pass
def test_drop(pyt):
drop(pyt)
with pytest.raises(tb.ClosedNodeError):
drop(pyt)
def test_create_index(pyt):
create_index(pyt, 'id')
assert 'id' in pyt.colindexes
def test_create_multiple_indexes(pyt):
create_index(pyt, ['id', 'amount'])
assert len(pyt.colindexes) == 2
assert 'id' in pyt.colindexes
assert 'amount' in pyt.colindexes
def test_create_multiple_indexes_fails(pyt):
with pytest.raises(ValueError):
create_index(pyt, ['id', 'blarg'])
with pytest.raises(ValueError):
create_index(pyt, ['foo', 'bar'])
def test_create_index_fails(pyt):
with pytest.raises(AttributeError):
create_index(pyt, 'no column here!')
def test_nrows():
assert compute(t.nrows, x) == len(x)
def test_nelements():
assert compute(t.nelements(axis=0), x) == len(x)
assert compute(t.nelements(), x) == len(x)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/tests/test_pytables_compute.py",
"copies": "3",
"size": "7685",
"license": "bsd-3-clause",
"hash": 101404500487275490,
"line_mean": 26.9454545455,
"line_max": 78,
"alpha_frac": 0.5854261548,
"autogenerated": false,
"ratio": 3.2535986452159187,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5339024800015919,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import json
import time
import piexif
import requests
import argparse
from datetime import datetime
# http://bugs.python.org/issue22377
# https://docs.python.org/2/library/time.html
os.environ["TZ"] = "CST"
time.tzset()
class Cookie(object):
def __init__(self, account, password):
self.cookie_instance = self._init_cookie(account, password)
def _get_headers(self):
headers = {
"Content-Type":
"application/x-www-form-urlencoded",
"Origin":
"http//www.renren.com",
"Referer":
"http://www.renren.com/SysHome.do",
"User-Agent":
"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0like Mac OS X; en-us) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
"X-Requested-With":
"XMLHttpRequest",
}
return headers
def _get_data(self, account, password):
data = {
"email": account,
"password": password,
"icode": "",
"origURL": "http://www.renren.com/home",
"domain": "renren.com",
"key_id": "1",
"captcha_type": "web_login"
}
return data
def _get_response(self, account, password):
kwargs = {}
kwargs["headers"] = self._get_headers()
kwargs["data"] = self._get_data(account, password)
kwargs["allow_redirects"] = True
response = requests.post("http://www.renren.com/ajaxLogin/login",
**kwargs)
assert response.status_code == 200, "status_code is {}!".format(
response.status_code)
return response.cookies, response.text
def _init_cookie(self, account, password):
cookie, text = self._get_response(account, password)
response_dict = json.loads(text)
assert response_dict[
"code"], "Failed to get the cookie because of {}!".format(
response_dict["failDescription"])
return cookie
def get_cookie(self):
return "; ".join([
"{}={}".format(key, value)
for key, value in self.cookie_instance.items()
])
def get_user_id(self):
return self.cookie_instance["id"]
class Album(object):
def __init__(self, cookie_object):
self.cookie = cookie_object.get_cookie()
self.user_id = cookie_object.get_user_id()
self.headers = self._get_headers()
self.re_pattern = re.compile("'albumList': (.*),")
def _get_headers(self):
headers = {
"Host": "photo.renren.com",
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:35.0) Gecko/20100101 Firefox/35.0"
),
"Accept": ("application/json, text/javascript, */*; q=0.01"),
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Cookie": self.cookie
}
return headers
def _get_response(self):
response = requests.get(
"http://photo.renren.com/photo/{}/albumlist/v7".format(
self.user_id),
headers=self.headers)
assert response.status_code == 200, "status_code is {}!".format(
response.status_code)
return response.text
def iterate_album_info(self):
raw_content = self._get_response()
for album_info in json.loads(
re.search(self.re_pattern, raw_content).groups()[0]):
album_name = album_info["albumName"]
album_URL = "http://photo.renren.com/photo/{}/album-{}/list".format(
self.user_id, album_info["albumId"])
yield album_name, album_URL
class Image(Album):
def __init__(self, cookie_object, album_URL):
super(Image, self).__init__(cookie_object=cookie_object)
self.album_URL = album_URL
def _get_response(self):
response = requests.get(self.album_URL, headers=self.headers)
assert response.status_code == 200, "status_code is {}!".format(
response.status_code)
return response.text
def iterate_image_info(self):
raw_content = self._get_response()
for image_info in json.loads(raw_content)["list"]:
large_image_URL = image_info["largeUrl"]
original_image_URL = large_image_URL.replace("large", "original")
image_name = image_info["title"] if image_info["title"] else int(
image_info["id"])
image_name = str(image_name).replace(
os.sep,
" ") + "." + large_image_URL.split("/")[-1].split(".")[-1]
image_time = image_info["time"]
yield image_name, (original_image_URL, large_image_URL), image_time
def download_image(image_URL,
image_file_path,
timeout=3,
sleep_duration=0.3,
retry_num=3):
assert not os.path.isfile(
image_file_path), "File {} already exists!".format(image_file_path)
while retry_num:
try:
response = requests.get(image_URL, stream=True, timeout=timeout)
assert response.status_code == 200, "status_code is {}!".format(
response.status_code)
if os.path.isfile(image_file_path):
os.remove(image_file_path)
with open(image_file_path, "wb") as file_object:
for chunk in response.iter_content():
file_object.write(chunk)
break
except Exception as exception:
print("Exception for {}: {}".format(image_URL, exception))
retry_num -= 1
continue
finally:
time.sleep(sleep_duration)
def download_album(account, password, main_folder_path):
cookie_object = Cookie(account=account, password=password)
for album_name, album_URL in Album(
cookie_object=cookie_object).iterate_album_info():
print("Working on album {} ...".format(album_name))
album_folder_path = os.path.join(main_folder_path, album_name)
os.makedirs(album_folder_path, exist_ok=True)
for image_name, image_URL_tuple, image_time in Image(
cookie_object=cookie_object,
album_URL=album_URL).iterate_image_info():
image_file_path = os.path.join(album_folder_path, image_name)
if os.path.isfile(image_file_path):
continue
print("Working on image {} ...".format(image_name))
for image_URL in image_URL_tuple:
download_image(image_URL, image_file_path)
if os.path.isfile(image_file_path):
break
if not os.path.isfile(image_file_path):
print("Failed to download image {}!".format(image_name))
continue
# Insert EXIF information
datetime_object = datetime.strptime(image_time,
"%a %b %d %H:%M:%S %Z %Y")
exif_ifd = {
piexif.ExifIFD.DateTimeOriginal: datetime_object.isoformat(" ")
}
exif_dict = {"Exif": exif_ifd}
exif_bytes = piexif.dump(exif_dict)
piexif.insert(exif_bytes, image_file_path)
def run():
print("Parsing command-line arguments ...")
parser = argparse.ArgumentParser(
description="A toolkit for downloading renren albums")
parser.add_argument("--account", required=True, help="Account")
parser.add_argument("--password", required=True, help="Password")
parser.add_argument("--main_folder_path",
default="/tmp/renren_albums",
help="Folder which stores the downloaded albums")
args = parser.parse_args()
print("Downloading renren albums ...")
download_album(args.account, args.password, args.main_folder_path)
print("All done!")
if __name__ == "__main__":
run()
| {
"repo_name": "nixingyang/Miscellaneous-Projects",
"path": "Renren Albums/solution.py",
"copies": "1",
"size": "8152",
"license": "mit",
"hash": 7043418634954964000,
"line_mean": 34.4434782609,
"line_max": 160,
"alpha_frac": 0.5588812561,
"autogenerated": false,
"ratio": 3.883754168651739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9942016775855314,
"avg_score": 0.00012372977928512687,
"num_lines": 230
} |
from __future__ import absolute_import, division, print_function
import os
import re
import requests
import logging
from subprocess import Popen, PIPE
from .utils import parse_xml
from .env import CondaCreator
from .compatibility import FileNotFoundError, urlparse
from .exceptions import HDFSConfigException, KnitException
from .yarn_api import YARNAPI
logger = logging.getLogger(__name__)
JAR_FILE = "knit-1.0-SNAPSHOT.jar"
JAVA_APP = "io.continuum.knit.Client"
class Knit(object):
"""
Connection to HDFS/YARN
Parameters
----------
nn: str
Namenode hostname/ip
nn: str
Namenode hostname/ip
nn_port: str
Namenode Port (default: 9000)
rm: str
Resource Manager hostname
rm_port: str
Resource Manager port (default: 8088)
autodetect: bool
Autodetect NN/RM IP/Ports
Examples
--------
>>> k = Knit()
>>> app_id = k.start('sleep 100', num_containers=5, memory=1024)
"""
def __init__(self, nn="localhost", nn_port=9000,
rm="localhost", rm_port=8088, autodetect=False):
self.nn = nn
self.nn_port = str(nn_port)
self.rm = rm
self.rm_port = str(rm_port)
if autodetect:
self.nn, self.nn_port = self._hdfs_conf(autodetect)
self.rm, self.rm_port = self._yarn_conf(autodetect)
else:
# validates IP/Port is correct
self._hdfs_conf()
self._yarn_conf()
self.yarn_api = YARNAPI(self.rm, self.rm_port)
self.java_lib_dir = os.path.join(os.path.dirname(__file__), "java_libs")
self.KNIT_HOME = os.environ.get('KNIT_HOME') or self.java_lib_dir
# must set KNIT_HOME ENV for YARN App
os.environ['KNIT_HOME'] = self.KNIT_HOME
def __str__(self):
return "Knit<NN={0}:{1};RM={2}:{3}>".format(self.nn, self.nn_port,
self.rm, self.rm_port)
__repr__ = __str__
@property
def JAR_FILE_PATH(self):
return os.path.join(self.KNIT_HOME, JAR_FILE)
def _yarn_conf(self, autodetect=False):
"""
Load YARN config from default locations.
Parameters
----------
autodetect: bool
Returns
-------
tuple (ip, port)
"""
confd = os.environ.get('HADOOP_CONF_DIR', os.environ.get('HADOOP_INSTALL',
'') + '/hadoop/conf')
conf = {}
yarn_site = os.path.join(confd, 'yarn-site.xml')
try:
with open(yarn_site, 'r') as f:
conf = parse_xml(f, 'yarn.resourcemanager.webapp.address')
except FileNotFoundError:
pass
finally:
if not conf:
conf['host'] = "localhost"
conf['port'] = "8088"
if autodetect:
return conf['host'], conf['port']
if self.rm != conf['host']:
msg = "Possible Resource Manager hostname mismatch. Detected {0}".format(conf['host'])
raise HDFSConfigException(msg)
if str(self.rm_port) != str(conf['port']):
msg = "Possible Resource Manager port mismatch. Detected {0}".format(conf['port'])
raise HDFSConfigException(msg)
return conf
def _hdfs_conf(self, autodetect=False):
""""
Parameters
----------
autodetect: bool
Returns
-------
tuple (ip, port)
"""
confd = os.environ.get('HADOOP_CONF_DIR', os.environ.get('HADOOP_INSTALL',
'') + '/hadoop/conf')
conf = {}
core_site = os.path.join(confd, 'core-site.xml')
try:
with open(core_site, 'r') as f:
conf = parse_xml(core_site, 'fs.defaultFS')
except FileNotFoundError:
pass
finally:
if not conf:
conf['host'] = "localhost"
conf['port'] = "9000"
if autodetect:
return conf['host'], conf['port']
if self.nn != conf['host']:
msg = "Possible Namenode hostname mismatch. Detected {0}".format(conf['host'])
raise HDFSConfigException(msg)
if str(self.nn_port) != str(conf['port']):
msg = "Possible Namenode port mismatch. Detected {0}".format(conf['port'])
raise HDFSConfigException(msg)
return conf
def start(self, cmd, num_containers=1, virtual_cores=1, memory=128, env="", files=None):
"""
Method to start a yarn app with a distributed shell
Parameters
----------
cmd: str
command to run in each yarn container
num_containers: int
Number of containers YARN should request (default: 1)
* A container should be requested with the number of cores it can
saturate, i.e.
* the average number of threads it expects to have runnable at a
time.
virtual_cores: int
Number of virtual cores per container (default: 1)
* A node's capacity should be configured with virtual cores equal to
* its number of physical cores.
memory: int
Memory per container (default: 128)
* The unit for memory is megabytes.
env: string
Full Path to zipped Python environment
files: list
list of files to be include in each container
Returns
-------
applicationId: str
A yarn application ID string
"""
args = ["hadoop", "jar", self.JAR_FILE_PATH, JAVA_APP, "--numContainers", str(num_containers),
"--command", cmd, "--virtualCores", str(virtual_cores), "--memory", str(memory)]
if env:
args = args + ["--pythonEnv", str(env)]
if files:
if not isinstance(files, list):
raise KnitException("File argument must be a list of strings")
f = ','.join(files)
args = args + ["--files", str(f)]
logger.debug("Running Command: {0}".format(' '.join(args)))
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
# last string in out is applicationId
# TODO Better JAVA Python communcation: appId, Resources, Yarn, etc.
appId = out.split()[-1].decode("utf-8")
appId = re.sub('id', '', appId)
return appId
@staticmethod
def create_env(env_name, packages=None, conda_root=None, remove=False):
"""
Create zipped directory of a conda environment
Parameters
----------
env_name : str
packages : list
conda_root : str, optional
remove : bool
remove possible conda environment before creating
Returns
-------
path: str
path to zipped conda environment
Examples
--------
>>> k = Knit()
>>> pkg_path = k.create_env(env_name='dev',
... packages=['distributed', 'dask', 'pandas'])
"""
c = CondaCreator(conda_root=conda_root)
path = c.create_env(env_name, packages=packages, remove=remove)
return path
def logs(self, app_id, shell=False):
"""
Collect logs from RM (if running)
With shell=True, collect logs from HDFS after job completion
Parameters
----------
app_id: str
A yarn application ID string
shell: bool
Shell out to yarn CLI (default False)
Returns
-------
log: dictionary
logs from each container (when possible)
"""
return self.yarn_api.logs(app_id, shell=shell)
def kill(self, app_id):
"""
Method to kill a yarn application
Parameters
----------
app_id: str
YARN application id
Returns
-------
bool:
True if successful, False otherwise.
"""
return self.yarn_api.kill(app_id)
def status(self, app_id):
""" Get status of an application
Parameters
----------
app_id: str
A yarn application ID string
Returns
-------
log: dictionary
status of application
"""
return self.yarn_api.status(app_id)
@property
def apps(self):
return self.yarn_api.apps
| {
"repo_name": "NielsZeilemaker/knit",
"path": "knit/core.py",
"copies": "1",
"size": "8596",
"license": "bsd-3-clause",
"hash": -5972192433371652000,
"line_mean": 27.6533333333,
"line_max": 102,
"alpha_frac": 0.5348999535,
"autogenerated": false,
"ratio": 4.097235462345091,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045122070312677545,
"num_lines": 300
} |
from __future__ import absolute_import, division, print_function
import os
import re
import shutil
import stat
def ensure_directory(directory, *args, **kwargs):
# Fail if the destination exists and it's not a directory
if not os.path.isdir(directory):
os.makedirs(directory, *args, **kwargs)
def copyfile(srcfile, destfile, skip=re.compile(r".*\.pyc\Z|__pycache__\Z", re.IGNORECASE)):
ensure_directory(os.path.dirname(destfile))
if os.path.isdir(srcfile):
# TODO: just use shutil.copytree to avoid bikeshedding
for name in os.listdir(srcfile):
if not skip.match(name):
copyfile(
os.path.join(srcfile, name),
os.path.join(destfile, name)
)
else:
# We use copyfile (not move, copy, or copy2) to be extra sure that we are
# not moving directories over (copyfile fails for directories) as well as
# to ensure that we are not copying over any metadata because we want more
# control over what metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Grab the stat data for the source file so we can use it to copy over
# certain metadata to the destination file.
st = os.stat(srcfile)
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(destfile, permissions)
class cached_property(object): # flake8: noqa
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| {
"repo_name": "ionelmc/virtualenv",
"path": "virtualenv/_utils.py",
"copies": "1",
"size": "1837",
"license": "mit",
"hash": -1442255240079679200,
"line_mean": 33.6603773585,
"line_max": 92,
"alpha_frac": 0.6265650517,
"autogenerated": false,
"ratio": 3.867368421052632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49939334727526313,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import shutil
import string
import sys
import time
import unittest
from collections import defaultdict, namedtuple, OrderedDict
import numpy as np
import ray
import ray.test.test_functions as test_functions
import ray.test.test_utils
if sys.version_info >= (3, 0):
from importlib import reload
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0:
obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
# Check that the correct version of cloudpickle is installed.
try:
import cloudpickle
cloudpickle.dumps(Point)
except AttributeError:
cloudpickle_command = "pip install --upgrade cloudpickle"
raise Exception("You have an older version of cloudpickle that is not "
"able to serialize namedtuples. Try running "
"\n\n{}\n\n".format(cloudpickle_command))
class SerializationTest(unittest.TestCase):
def testRecursiveObjects(self):
ray.init(num_workers=0)
class ClassA(object):
pass
# Make a list that contains itself.
l = []
l.append(l)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [l, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
self.assertRaises(Exception, lambda: ray.put(obj))
ray.worker.cleanup()
def testPassingArgumentsByValue(self):
ray.init(num_workers=1)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
ray.worker.cleanup()
def testPassingArgumentsByValueOutOfTheBox(self):
ray.init(num_workers=1)
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
self.assertEqual(ray.get(f.remote(temp))(), 1)
self.assertEqual(ray.get(f.remote(lambda x: x + 1))(3), 4)
# Test sets.
self.assertEqual(ray.get(f.remote(set())), set())
s = set([1, (1, 2, "hi")])
self.assertEqual(ray.get(f.remote(s)), s)
# Test types.
self.assertEqual(ray.get(f.remote(int)), int)
self.assertEqual(ray.get(f.remote(float)), float)
self.assertEqual(ray.get(f.remote(str)), str)
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
ray.worker.cleanup()
class WorkerTest(unittest.TestCase):
def testPythonWorkers(self):
# Test the codepath for starting workers from the Python script,
# instead of the local scheduler. This codepath is for debugging
# purposes only.
num_workers = 4
ray.worker._init(
num_workers=num_workers,
start_workers_from_local_scheduler=False,
start_ray_local=True)
@ray.remote
def f(x):
return x
values = ray.get([f.remote(1) for i in range(num_workers * 2)])
self.assertEqual(values, [1] * (num_workers * 2))
ray.worker.cleanup()
def testPutGet(self):
ray.init(num_workers=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
ray.worker.cleanup()
class APITest(unittest.TestCase):
def init_ray(self, kwargs=None):
if kwargs is None:
kwargs = {}
ray.init(**kwargs)
def tearDown(self):
ray.worker.cleanup()
def testRegisterClass(self):
self.init_ray({"num_workers": 2})
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
self.assertEqual(foo, Foo(7))
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# self.assertEqual(regex, new_regex)
# Instead, we do this:
self.assertEqual(regex.pattern, new_regex.pattern)
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
self.assertEqual(subqux.objs[2].foo.value, 0)
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
self.assertEqual(ray.get(h2.remote(10)).value, 10)
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
self.assertFalse(hasattr(c0, "method1"))
self.assertFalse(hasattr(c0, "method2"))
self.assertFalse(hasattr(c1, "method0"))
self.assertFalse(hasattr(c1, "method2"))
self.assertFalse(hasattr(c2, "method0"))
self.assertFalse(hasattr(c2, "method1"))
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
self.assertFalse(hasattr(c0, "method1"))
self.assertFalse(hasattr(c0, "method2"))
self.assertFalse(hasattr(c1, "method0"))
self.assertFalse(hasattr(c1, "method2"))
self.assertFalse(hasattr(c2, "method0"))
self.assertFalse(hasattr(c2, "method1"))
def testKeywordArgs(self):
reload(test_functions)
self.init_ray()
x = test_functions.keyword_fct1.remote(1)
self.assertEqual(ray.get(x), "1 hello")
x = test_functions.keyword_fct1.remote(1, "hi")
self.assertEqual(ray.get(x), "1 hi")
x = test_functions.keyword_fct1.remote(1, b="world")
self.assertEqual(ray.get(x), "1 world")
x = test_functions.keyword_fct2.remote(a="w", b="hi")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct2.remote(b="hi", a="w")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct2.remote(a="w")
self.assertEqual(ray.get(x), "w world")
x = test_functions.keyword_fct2.remote(b="hi")
self.assertEqual(ray.get(x), "hello hi")
x = test_functions.keyword_fct2.remote("w")
self.assertEqual(ray.get(x), "w world")
x = test_functions.keyword_fct2.remote("w", "hi")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct3.remote(0, 1, c="w", d="hi")
self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, d="hi", c="w")
self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, c="w")
self.assertEqual(ray.get(x), "0 1 w world")
x = test_functions.keyword_fct3.remote(0, 1, d="hi")
self.assertEqual(ray.get(x), "0 1 hello hi")
x = test_functions.keyword_fct3.remote(0, 1)
self.assertEqual(ray.get(x), "0 1 hello world")
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with self.assertRaises(Exception):
f1.remote(3)
with self.assertRaises(Exception):
f1.remote(x=3)
with self.assertRaises(Exception):
f2.remote(0, w=0)
# Make sure we get an exception if too many arguments are passed in.
with self.assertRaises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
self.assertEqual(ray.get(f3.remote(4)), 4)
def testVariableNumberOfArgs(self):
reload(test_functions)
self.init_ray()
x = test_functions.varargs_fct1.remote(0, 1, 2)
self.assertEqual(ray.get(x), "0 1 2")
x = test_functions.varargs_fct2.remote(0, 1, 2)
self.assertEqual(ray.get(x), "1 2")
self.assertTrue(test_functions.kwargs_exception_thrown)
self.assertTrue(test_functions.varargs_and_kwargs_exception_thrown)
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
self.assertEqual(ray.get(f1.remote()), ())
self.assertEqual(ray.get(f1.remote(1)), (1, ))
self.assertEqual(ray.get(f1.remote(1, 2, 3)), (1, 2, 3))
with self.assertRaises(Exception):
f2.remote()
with self.assertRaises(Exception):
f2.remote(1)
self.assertEqual(ray.get(f2.remote(1, 2)), (1, 2, ()))
self.assertEqual(ray.get(f2.remote(1, 2, 3)), (1, 2, (3, )))
self.assertEqual(ray.get(f2.remote(1, 2, 3, 4)), (1, 2, (3, 4)))
def testNoArgs(self):
reload(test_functions)
self.init_ray()
ray.get(test_functions.no_op.remote())
def testDefiningRemoteFunctions(self):
self.init_ray({"num_cpus": 3})
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
self.assertEqual(ray.get(f.remote(0)), 1)
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
self.assertTrue(val in [1, 10])
if val == 10:
break
else:
print("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def l(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(l.remote(x))
self.assertEqual(ray.get(k.remote(1)), 2)
self.assertEqual(ray.get(l.remote(1)), 2)
self.assertEqual(ray.get(m.remote(1)), 2)
def testGetMultiple(self):
self.init_ray()
object_ids = [ray.put(i) for i in range(10)]
self.assertEqual(ray.get(object_ids), list(range(10)))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
self.assertEqual(results, indices)
def testWait(self):
self.init_ray({"num_cpus": 1})
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [
f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
ready_ids, remaining_ids = ray.wait(objectids)
self.assertEqual(len(ready_ids), 1)
self.assertEqual(len(remaining_ids), 3)
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
self.assertEqual(set(ready_ids), set(objectids))
self.assertEqual(remaining_ids, [])
objectids = [
f.remote(0.5),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(
objectids, timeout=1750, num_returns=4)
self.assertLess(time.time() - start_time, 2)
self.assertEqual(len(ready_ids), 3)
self.assertEqual(len(remaining_ids), 1)
ray.wait(objectids)
objectids = [
f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5000)
self.assertTrue(time.time() - start_time < 5)
self.assertEqual(len(ready_ids), 1)
self.assertEqual(len(remaining_ids), 3)
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
self.assertRaises(Exception, lambda: ray.wait([x, x]))
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
self.assertEqual(ready_ids, [])
self.assertEqual(remaining_ids, [])
def testMultipleWaitsAndGets(self):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
self.init_ray()
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def testCachingFunctionsToRun(self):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
self.init_ray()
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
self.assertEqual(ray.get(res1), (1, 2, 3, 4))
self.assertEqual(ray.get(res2), (1, 2, 3, 4))
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def testRunningFunctionOnAllWorkers(self):
self.init_ray()
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
self.assertEqual("fake_directory", ray.get(get_path1.remote())[-1])
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
self.assertTrue("fake_directory" not in ray.get(get_path2.remote()))
def testLoggingAPI(self):
self.init_ray({"driver_mode": ray.SILENT_MODE})
def events():
# This is a hack for getting the event log. It is not part of the
# API.
keys = ray.worker.global_worker.redis_client.keys("event_log:*")
res = []
for key in keys:
res.extend(
ray.worker.global_worker.redis_client.zrange(key, 0, -1))
return res
def wait_for_num_events(num_events, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(events()) >= num_events:
return
time.sleep(0.1)
print("Timing out of wait.")
@ray.remote
def test_log_event():
ray.log_event("event_type1", contents={"key": "val"})
@ray.remote
def test_log_span():
with ray.log_span("event_type2", contents={"key": "val"}):
pass
# Make sure that we can call ray.log_event in a remote function.
ray.get(test_log_event.remote())
# Wait for the event to appear in the event log.
wait_for_num_events(1)
self.assertEqual(len(events()), 1)
# Make sure that we can call ray.log_span in a remote function.
ray.get(test_log_span.remote())
# Wait for the events to appear in the event log.
wait_for_num_events(2)
self.assertEqual(len(events()), 2)
@ray.remote
def test_log_span_exception():
with ray.log_span("event_type2", contents={"key": "val"}):
raise Exception("This failed.")
# Make sure that logging a span works if an exception is thrown.
test_log_span_exception.remote()
# Wait for the events to appear in the event log.
wait_for_num_events(3)
self.assertEqual(len(events()), 3)
def testIdenticalFunctionNames(self):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
self.init_ray()
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
self.assertEqual(ray.get(results1), num_calls * [1])
self.assertEqual(ray.get(results2), num_calls * [2])
self.assertEqual(ray.get(results3), num_calls * [3])
self.assertEqual(ray.get(results4), num_calls * [4])
self.assertEqual(ray.get(results5), num_calls * [5])
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
self.assertEqual(result_values, num_calls * [5])
def testIllegalAPICalls(self):
self.init_ray()
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with self.assertRaises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with self.assertRaises(Exception):
ray.get(3)
class APITestSharded(APITest):
def init_ray(self, kwargs=None):
if kwargs is None:
kwargs = {}
kwargs["start_ray_local"] = True
kwargs["num_redis_shards"] = 20
kwargs["redirect_output"] = True
ray.worker._init(**kwargs)
class PythonModeTest(unittest.TestCase):
def testPythonMode(self):
reload(test_functions)
ray.init(driver_mode=ray.PYTHON_MODE)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = test_functions.python_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = test_functions.python_mode_g.remote(aref)
# Make sure python_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in PYTHON_MODE.
@ray.remote
class PythonModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = PythonModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
ray.worker.cleanup()
class UtilsTest(unittest.TestCase):
def testCopyingDirectory(self):
# The functionality being tested here is really multi-node
# functionality, but this test just uses a single node.
ray.init(num_workers=1)
source_text = "hello world"
temp_dir1 = os.path.join(os.path.dirname(__file__), "temp_dir1")
source_dir = os.path.join(temp_dir1, "dir")
source_file = os.path.join(source_dir, "file.txt")
temp_dir2 = os.path.join(os.path.dirname(__file__), "temp_dir2")
target_dir = os.path.join(temp_dir2, "dir")
target_file = os.path.join(target_dir, "file.txt")
def remove_temporary_files():
if os.path.exists(temp_dir1):
shutil.rmtree(temp_dir1)
if os.path.exists(temp_dir2):
shutil.rmtree(temp_dir2)
# Remove the relevant files if they are left over from a previous run
# of this test.
remove_temporary_files()
# Create the source files.
os.mkdir(temp_dir1)
os.mkdir(source_dir)
with open(source_file, "w") as f:
f.write(source_text)
# Copy the source directory to the target directory.
ray.experimental.copy_directory(source_dir, target_dir)
time.sleep(0.5)
# Check that the target files exist and are the same as the source
# files.
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_file))
with open(target_file, "r") as f:
self.assertEqual(f.read(), source_text)
# Remove the relevant files to clean up.
remove_temporary_files()
ray.worker.cleanup()
class ResourcesTest(unittest.TestCase):
def testResourceConstraints(self):
num_workers = 20
ray.init(num_workers=num_workers, num_cpus=10, num_gpus=2)
# Attempt to wait for all of the workers to start up.
ray.worker.global_worker.run_function_on_all_workers(
lambda worker_info: sys.path.append(worker_info["counter"]))
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(1)
return sys.path[-1]
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
ray.worker.cleanup()
def testMultiResourceConstraints(self):
num_workers = 20
ray.init(num_workers=num_workers, num_cpus=10, num_gpus=10)
# Attempt to wait for all of the workers to start up.
ray.worker.global_worker.run_function_on_all_workers(
lambda worker_info: sys.path.append(worker_info["counter"]))
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(1)
return sys.path[-1]
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
ray.worker.cleanup()
def testGPUIDs(self):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
list_of_ids = ray.get([f0.remote() for _ in range(10)])
self.assertEqual(list_of_ids, 10 * [[]])
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = set([tuple(gpu_ids) for gpu_ids in list_of_ids])
self.assertEqual(set_of_ids, set([(i, ) for i in range(10)]))
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
self.assertEqual(set(all_ids), set(range(10)))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
self.assertGreater(t2 - t1, 0.09)
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
self.assertEqual(set(all_ids), set(range(10)))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
ray.worker.cleanup()
def testMultipleLocalSchedulers(self):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
address_info = ray.worker._init(
start_ray_local=True,
num_local_schedulers=3,
num_workers=1,
num_cpus=[100, 5, 10],
num_gpus=[0, 5, 1])
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
store_names = [
object_store_address.name
for object_store_address in address_info["object_store_addresses"]
]
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
self.assertIn(result, [store_names[0]])
elif name == "run_on_1":
self.assertIn(result, [store_names[1]])
elif name == "run_on_2":
self.assertIn(result, [store_names[2]])
elif name == "run_on_0_1_2":
self.assertIn(result, [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
self.assertIn(result, [store_names[1], store_names[2]])
elif name == "run_on_0_2":
self.assertIn(result, [store_names[0], store_names[2]])
else:
raise Exception("This should be unreachable.")
self.assertEqual(set(ray.get(results)), set(store_names))
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
ray.worker.cleanup()
def testCustomResources(self):
ray.worker._init(
start_ray_local=True,
num_local_schedulers=2,
num_cpus=3,
num_custom_resource=[0, 1])
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(num_custom_resource=1)
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(num_custom_resource=1)
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
self.assertEqual(len(set(ray.get([f.remote() for _ in range(50)]))), 2)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
self.assertEqual(len(local_scheduler_ids), 1)
self.assertNotEqual(list(local_scheduler_ids)[0], local_plasma)
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
ray.worker.cleanup()
def testInfiniteCustomResource(self):
# Make sure that -1 corresponds to an infinite resource capacity.
ray.init(num_custom_resource=-1)
def f():
return 1
ray.get(ray.remote(num_custom_resource=0)(f).remote())
ray.get(ray.remote(num_custom_resource=1)(f).remote())
ray.get(ray.remote(num_custom_resource=2)(f).remote())
ray.get(ray.remote(num_custom_resource=4)(f).remote())
ray.get(ray.remote(num_custom_resource=8)(f).remote())
ray.get(ray.remote(num_custom_resource=(10**10))(f).remote())
ray.worker.cleanup()
class WorkerPoolTests(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testNoWorkers(self):
ray.init(num_workers=0)
@ray.remote
def f():
return 1
# Make sure we can call a remote function. This will require starting a
# new worker.
ray.get(f.remote())
ray.get([f.remote() for _ in range(100)])
def testBlockingTasks(self):
ray.init(num_workers=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(10)]
return ray.get(object_ids)
ray.get([g.remote(i) for i in range(100)])
@ray.remote
def _sleep(i):
time.sleep(1)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes one second to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
ray.worker.cleanup()
def testMaxCallTasks(self):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
self.assertEqual(pid1, pid2)
ray.test.test_utils.wait_for_pid_to_exit(pid1)
ray.worker.cleanup()
class SchedulingAlgorithm(unittest.TestCase):
def attempt_to_load_balance(self,
remote_function,
args,
total_tasks,
num_local_schedulers,
minimum_count,
num_attempts=20):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
print("Counts are {}.".format(counts))
if (len(names) == num_local_schedulers
and all([count >= minimum_count for count in counts])):
break
attempts += 1
self.assertLess(attempts, num_attempts)
def testLoadBalancing(self):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
num_local_schedulers = 3
num_cpus = 7
ray.worker._init(
start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
self.attempt_to_load_balance(f, [], 100, num_local_schedulers, 25)
self.attempt_to_load_balance(f, [], 1000, num_local_schedulers, 250)
ray.worker.cleanup()
def testLoadBalancingWithDependencies(self):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
num_workers = 3
num_local_schedulers = 3
ray.worker._init(
start_ray_local=True,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers)
@ray.remote
def f(x):
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
self.attempt_to_load_balance(f, [x], 100, num_local_schedulers, 25)
ray.worker.cleanup()
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
class GlobalStateAPI(unittest.TestCase):
def testGlobalStateAPI(self):
with self.assertRaises(Exception):
ray.global_state.object_table()
with self.assertRaises(Exception):
ray.global_state.task_table()
with self.assertRaises(Exception):
ray.global_state.client_table()
with self.assertRaises(Exception):
ray.global_state.function_table()
with self.assertRaises(Exception):
ray.global_state.log_files()
ray.init()
self.assertEqual(ray.global_state.object_table(), dict())
ID_SIZE = 20
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.current_task_id.id())
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
self.assertEqual(len(task_table), 1)
self.assertEqual(driver_task_id, list(task_table.keys())[0])
self.assertEqual(task_table[driver_task_id]["State"],
ray.experimental.state.TASK_STATUS_RUNNING)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["TaskID"],
driver_task_id)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["ActorID"],
ID_SIZE * "ff")
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["Args"], [])
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["DriverID"],
driver_id)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["FunctionID"],
ID_SIZE * "ff")
self.assertEqual(
(task_table[driver_task_id]["TaskSpec"]["ReturnObjectIDs"]), [])
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
self.assertEqual(len(client_table[node_ip_address]), 3)
manager_client = [
c for c in client_table[node_ip_address]
if c["ClientType"] == "plasma_manager"
][0]
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
start_time = time.time()
while time.time() - start_time < 10:
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
self.assertEqual(len(task_table), 1 + 1)
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
if task_table[task_id]["State"] == "DONE":
break
time.sleep(0.1)
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
self.assertEqual(task_spec["ActorID"], ID_SIZE * "ff")
self.assertEqual(task_spec["Args"], [1, "hi", x_id])
self.assertEqual(task_spec["DriverID"], driver_id)
self.assertEqual(task_spec["ReturnObjectIDs"], [result_id])
function_table_entry = function_table[task_spec["FunctionID"]]
self.assertEqual(function_table_entry["Name"], "__main__.f")
self.assertEqual(function_table_entry["DriverID"], driver_id)
self.assertEqual(function_table_entry["Module"], "__main__")
self.assertEqual(task_table[task_id],
ray.global_state.task_table(task_id))
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (
object_table[x_id]["ManagerIDs"] is not None
and object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
# Wait for the object table to be updated.
wait_for_object_table()
object_table = ray.global_state.object_table()
self.assertEqual(len(object_table), 2)
self.assertEqual(object_table[x_id]["IsPut"], True)
self.assertEqual(object_table[x_id]["TaskID"], driver_task_id)
self.assertEqual(object_table[x_id]["ManagerIDs"],
[manager_client["DBClientID"]])
self.assertEqual(object_table[result_id]["IsPut"], False)
self.assertEqual(object_table[result_id]["TaskID"], task_id)
self.assertEqual(object_table[result_id]["ManagerIDs"],
[manager_client["DBClientID"]])
self.assertEqual(object_table[x_id],
ray.global_state.object_table(x_id))
self.assertEqual(object_table[result_id],
ray.global_state.object_table(result_id))
ray.worker.cleanup()
def testLogFileAPI(self):
ray.init(redirect_output=True)
message = "unique message"
@ray.remote
def f():
print(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
self.assertEqual(found_message, True)
ray.worker.cleanup()
def testTaskProfileAPI(self):
ray.init(redirect_output=True)
@ray.remote
def f():
return 1
num_calls = 5
[f.remote() for _ in range(num_calls)]
# Make sure the event log has the correct number of events.
start_time = time.time()
while time.time() - start_time < 10:
profiles = ray.global_state.task_profiles(
100, start=0, end=time.time())
limited_profiles = ray.global_state.task_profiles(
1, start=0, end=time.time())
if len(profiles) == num_calls and len(limited_profiles) == 1:
break
time.sleep(0.1)
self.assertEqual(len(profiles), num_calls)
self.assertEqual(len(limited_profiles), 1)
# Make sure that each entry is properly formatted.
for task_id, data in profiles.items():
self.assertIn("execute_start", data)
self.assertIn("execute_end", data)
self.assertIn("get_arguments_start", data)
self.assertIn("get_arguments_end", data)
self.assertIn("store_outputs_start", data)
self.assertIn("store_outputs_end", data)
ray.worker.cleanup()
def testWorkers(self):
num_workers = 3
ray.init(
redirect_output=True,
num_cpus=num_workers,
num_workers=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker)
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
self.assertEqual(len(worker_info), num_workers)
for worker_id, info in worker_info.items():
self.assertEqual(info["node_ip_address"], "127.0.0.1")
self.assertIn("local_scheduler_socket", info)
self.assertIn("plasma_manager_socket", info)
self.assertIn("plasma_store_socket", info)
self.assertIn("stderr_file", info)
self.assertIn("stdout_file", info)
ray.worker.cleanup()
def testDumpTraceFile(self):
ray.init(redirect_output=True)
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def __init__(self):
pass
def method(self):
pass
ray.get([f.remote() for _ in range(10)])
actors = [Foo.remote() for _ in range(5)]
ray.get([actor.method.remote() for actor in actors])
ray.get([actor.method.remote() for actor in actors])
path = os.path.join("/tmp/ray_test_trace")
task_info = ray.global_state.task_profiles(
100, start=0, end=time.time())
ray.global_state.dump_catapult_trace(path, task_info)
# TODO(rkn): This test is not perfect because it does not verify that
# the visualization actually renders (e.g., the context of the dumped
# trace could be malformed).
ray.worker.cleanup()
if __name__ == "__main__":
unittest.main(verbosity=2)
| {
"repo_name": "Wapaul1/ray",
"path": "test/runtest.py",
"copies": "1",
"size": "63837",
"license": "apache-2.0",
"hash": 6193776593717088000,
"line_mean": 31.9566339701,
"line_max": 79,
"alpha_frac": 0.5427573351,
"autogenerated": false,
"ratio": 3.73709167544784,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.477984901054784,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import subprocess
import sys
import decimal
from operator import attrgetter
from itertools import chain
from collections import Iterator
from datetime import datetime, date
from distutils.spawn import find_executable
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from multipledispatch import MDNotImplementedError
import datashape
from datashape import DataShape, Record, Option, var, dshape, Map
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover, datetime_, date_, float64, int64, int_, string
from datashape import float32
from datashape.dispatch import dispatch
from toolz import (partition_all, keyfilter, memoize, valfilter, identity,
concat, curry, merge)
from toolz.curried import pluck, map
from ..compatibility import unicode
from ..utils import keywords, ignoring, iter_except, filter_kwargs
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = int, float, datetime, date, bool, str, decimal.Decimal
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({
sa.DATETIME: datetime_,
sa.TIMESTAMP: datetime_,
sa.FLOAT: float64,
sa.DATE: date_,
sa.BIGINT: int64,
sa.INTEGER: int_,
sa.BIGINT: int64,
sa.types.NullType: string,
sa.REAL: float32,
sa.Float: float64,
sa.Float(precision=24): float32,
sa.Float(precision=53): float64,
})
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def getbind(t, bind):
if bind is None:
return t.bind
if isinstance(bind, sa.engine.base.Engine):
return bind
return sa.create_engine(bind)
def batch(sel, chunksize=10000, bind=None):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowterator(sel, chunksize=chunksize):
with getbind(sel, bind).connect() as conn:
result = conn.execute(sel)
yield result.keys()
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
terator = rowterator(sel)
return next(terator), concat(terator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.Interval(day_precision=0, second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return revtypes[type(typ)]
if isinstance(typ, (sa.NUMERIC, sa.DECIMAL)):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, typ.collation)
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.ForeignKey, sa.sql.FromClause)
def discover_foreign_key_relationship(fk, parent, parent_measure=None):
if fk.column.table is not parent:
parent_measure = discover(fk.column.table).measure
return {fk.parent.name: Map(discover(fk.parent.type), parent_measure)}
@discover.register(sa.Column)
def discover_sqlalchemy_column(c):
meta = Option if c.nullable else identity
return Record([(c.name, meta(discover(c.type)))])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
ordering = dict((c, i) for i, c in enumerate(c for c in t.columns.keys()))
records = list(sum([discover(c).parameters[0] for c in t.columns], ()))
fkeys = [discover(fkey, t, parent_measure=Record(records))
for fkey in t.foreign_keys]
for name, column in merge(*fkeys).items():
records[ordering[name]] = (name, column)
return var * Record(records)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = sa.MetaData(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
return discover(sa.MetaData(engine))
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % "\n\t".join(e.args) +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def validate_foreign_keys(ds, foreign_keys):
# passed foreign_keys and column in dshape, but not a ForeignKey type
for field in foreign_keys:
if field not in ds.measure.names:
raise TypeError('Requested foreign key field %r is not a field in '
'datashape %s' % (field, ds))
for field, typ in ds.measure.fields:
if field in foreign_keys and not isinstance(getattr(typ, 'ty', typ),
Map):
raise TypeError('Foreign key %s passed in but not a Map '
'datashape, got %s' % (field, typ))
if isinstance(typ, Map) and field not in foreign_keys:
raise TypeError('Map type %s found on column %s, but %r '
"wasn't found in %s" %
(typ, field, field, foreign_keys))
def dshape_to_table(name, ds, metadata=None, foreign_keys=None,
primary_key=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if not isrecord(ds.measure):
raise TypeError('dshape measure must be a record type e.g., '
'"{a: int64, b: int64}". Input measure is %r' %
ds.measure)
if metadata is None:
metadata = sa.MetaData()
if foreign_keys is None:
foreign_keys = {}
validate_foreign_keys(ds, foreign_keys)
cols = dshape_to_alchemy(ds, primary_key=primary_key or frozenset())
cols.extend(sa.ForeignKeyConstraint([column_name], [referent])
for column_name, referent in foreign_keys.items())
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, foreign_keys=None,
primary_key=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = sa.MetaData(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return engine
def dshape_to_alchemy(dshape, primary_key=frozenset()):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Map):
return dshape_to_alchemy(dshape.key.measure, primary_key=primary_key)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty, primary_key=primary_key)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(getattr(typ, 'ty', typ),
primary_key=primary_key),
primary_key=name in primary_key,
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1], primary_key=primary_key)
else:
return dshape_to_alchemy(dshape[0], primary_key=primary_key)
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.TEXT
string_types = dict(U=sa.Unicode, A=sa.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
return sa.DATETIME(timezone=dshape.tz is not None)
if isinstance(dshape, datashape.Decimal):
return sa.NUMERIC(dshape.precision, dshape.scale)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, bind=None, **kwargs):
_, rows = batch(sa.select([t]), bind=bind)
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel, bind=bind)
return func(rows)
@convert.register(base, sa.sql.Select, cost=200.0)
def select_to_base(sel, dshape=None, bind=None, **kwargs):
if dshape is not None and not isscalar(dshape):
raise ValueError('dshape should be None or scalar, got %s' % dshape)
with getbind(sel, bind).connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, bind=None, **kwargs):
assert not isinstance(t, type)
engine = getbind(t, bind)
if not t.exists(bind=engine):
t.create(bind=engine)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
dshape = dshape and datashape.dshape(dshape)
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, bind=None, **kwargs):
t_bind = getbind(t, bind)
o_bind = getbind(o, bind)
if t_bind != o_bind:
return append(
t,
convert(Iterator, o, bind=bind, **kwargs),
bind=bind,
**kwargs
)
bind = t_bind
assert bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with bind.connect() as conn:
conn.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(
obj,
'before_create',
ddl.execute_if(
callable_=should_create_schema,
dialect='postgresql'
)
)
return obj
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
engine = sa.create_engine(uri, connect_args=kwargs.pop('connect_args', {}),
**filter_kwargs(sa.create_engine, kwargs))
ds = kwargs.pop('dshape', None)
schema = kwargs.pop('schema', None)
foreign_keys = kwargs.pop('foreign_keys', None)
primary_key = kwargs.pop('primary_key', None)
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = sa.MetaData(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(
sa.Table(table_name, metadata, schema=schema,
autoload_with=engine),
schema
)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema,
foreign_keys=foreign_keys)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table, bind=None):
bind = getbind(table, bind)
table.drop(bind=bind, checkfirst=True)
if table.exists(bind=bind):
raise ValueError('table %r dropped but still exists' % table.name)
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
def select_or_selectable_to_frame(el, bind=None, **kwargs):
columns, rows = batch(el, bind=bind)
row = next(rows, None)
if row is None:
return pd.DataFrame(columns=columns)
return pd.DataFrame(list(chain([tuple(row)], map(tuple, rows))),
columns=columns)
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(
self,
element,
path,
delimiter=',',
quotechar='"',
lineterminator='\n',
escapechar='\\',
header=True,
na_value='',
encoding=None,
bind=None,
):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
self._bind = bind = getbind(element, bind)
# mysql cannot write headers
self.header = header and bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
self.encoding = encoding
@property
def bind(self):
return self._bind
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""COPY {0} TO :path
WITH (
FORMAT CSV,
HEADER :header,
DELIMITER :delimiter,
QUOTE :quotechar,
NULL :na_value,
ESCAPE :escapechar,
ENCODING :encoding
)
""".format(
compiler.preparer.format_table(selectable)
if isinstance(selectable, sa.Table)
else '({0})'.format(compiler.process(selectable))
)
).bindparams(
path=element.path,
header=element.header,
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar,
encoding=element.encoding or element.bind.execute(
'show client_encoding'
).scalar()
),
**kwargs
)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""{0} INTO OUTFILE :path
CHARACTER SET :encoding
FIELDS TERMINATED BY :delimiter
OPTIONALLY ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
""".format(
compiler.process(
selectable.select()
if isinstance(selectable, sa.Table) else selectable,
**kwargs
)
)
).bindparams(
path=element.path,
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator
)
)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if element.encoding is not None:
raise ValueError(
"'encoding' keyword argument not supported for "
"SQLite to CSV conversion"
)
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
# we are sending a SQL string directorly to the SQLite process so we always
# need to bind everything before sending it
kwargs['literal_binds'] = True
selectable = element.element
sql = compiler.process(
selectable.select() if isinstance(selectable, sa.Table) else selectable,
**kwargs
) + ';'
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode(
sys.getfilesystemencoding() # we send bytes to the process
)
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, bind=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(
selectable,
os.path.abspath(csv.path),
bind=bind,
**kwargs
)
with getbind(selectable, bind).begin() as conn:
conn.execute(stmt)
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/sql.py",
"copies": "1",
"size": "25079",
"license": "bsd-3-clause",
"hash": 3755807654230929000,
"line_mean": 32.5729585007,
"line_max": 113,
"alpha_frac": 0.608038598,
"autogenerated": false,
"ratio": 3.7952481840193704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49032867820193704,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import subprocess
import uuid
import mmap
from contextlib import closing
from functools import partial
from distutils.spawn import find_executable
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import Executable, ClauseElement
from toolz import merge
from multipledispatch import MDNotImplementedError
from ..append import append
from ..convert import convert
from .csv import CSV, infer_header
from ..temp import Temp
from .aws import S3
from .sql import fullname
class CopyFromCSV(Executable, ClauseElement):
def __init__(self, element, csv, delimiter=',', header=None, na_value='',
lineterminator='\n', quotechar='"', escapechar='\\',
encoding='utf8', skiprows=0, **kwargs):
if not isinstance(element, sa.Table):
raise TypeError('element must be a sqlalchemy.Table instance')
self.element = element
self.csv = csv
self.delimiter = delimiter
self.header = (header if header is not None else
(csv.has_header
if csv.has_header is not None else infer_header(csv)))
self.na_value = na_value
self.lineterminator = lineterminator
self.quotechar = quotechar
self.escapechar = escapechar
self.encoding = encoding
self.skiprows = int(skiprows or self.header)
for k, v in kwargs.items():
setattr(self, k, v)
@property
def bind(self):
return self.element.bind
@compiles(CopyFromCSV, 'sqlite')
def compile_from_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
t = element.element
if not element.header:
csv = element.csv
else:
csv = Temp(CSV)('.%s' % uuid.uuid1())
assert csv.has_header, \
'SQLAlchemy element.header is True but CSV inferred no header'
# write to a temporary file after skipping the first line
chunksize = 1 << 24 # 16 MiB
lineterminator = element.lineterminator.encode(element.encoding)
with open(element.csv.path, 'rb') as f:
with closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mf:
index = mf.find(lineterminator)
if index == -1:
raise ValueError("'%s' not found" % lineterminator)
mf.seek(index + len(lineterminator)) # len because \r\n
with open(csv.path, 'wb') as g:
for chunk in iter(partial(mf.read, chunksize), b''):
g.write(chunk)
fullpath = os.path.abspath(csv.path).encode('unicode-escape').decode()
cmd = ['sqlite3',
'-nullvalue', repr(element.na_value),
'-separator', element.delimiter,
'-cmd', '.import "%s" %s' % (fullpath, t.name),
element.bind.url.database]
stdout, stderr = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE).communicate()
assert not stdout, \
'error: %s from command: %s' % (stdout, ' '.join(cmd))
return ''
@compiles(CopyFromCSV, 'mysql')
def compile_from_csv_mysql(element, compiler, **kwargs):
if element.na_value:
raise ValueError('MySQL does not support custom NULL values')
encoding = {'utf-8': 'utf8'}.get(element.encoding.lower(),
element.encoding or 'utf8')
escapechar = element.escapechar.encode('unicode-escape').decode()
lineterminator = element.lineterminator.encode('unicode-escape').decode()
result = r"""
LOAD DATA {local} INFILE '{path}'
INTO TABLE {0.element.name}
CHARACTER SET {encoding}
FIELDS
TERMINATED BY '{0.delimiter}'
ENCLOSED BY '{0.quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED BY '{0.lineterminator}'
IGNORE {0.skiprows} LINES;
""".format(element,
path=os.path.abspath(element.csv.path),
local=getattr(element, 'local', ''),
encoding=encoding,
lineterminator=lineterminator,
escapechar=escapechar).strip()
return result
@compiles(CopyFromCSV, 'postgresql')
def compile_from_csv_postgres(element, compiler, **kwargs):
encoding = {'utf8': 'utf-8'}.get(element.encoding.lower(),
element.encoding or 'utf8')
if len(element.escapechar) != 1:
raise ValueError('postgres does not allow escapechar longer than 1 '
'byte')
statement = """
COPY {fullname} FROM '{path}'
(FORMAT CSV,
DELIMITER E'{0.delimiter}',
NULL '{0.na_value}',
QUOTE '{0.quotechar}',
ESCAPE '{0.escapechar}',
HEADER {header},
ENCODING '{encoding}')"""
return statement.format(element,
fullname=fullname(element.element, compiler),
path=os.path.abspath(element.csv.path),
header=str(element.header).upper(),
encoding=encoding).strip()
try:
import boto
from odo.backends.aws import S3
from redshift_sqlalchemy.dialect import CopyCommand
import sqlalchemy as sa
except ImportError:
pass
else:
@compiles(CopyFromCSV, 'redshift')
def compile_from_csv_redshift(element, compiler, **kwargs):
assert isinstance(element.csv, S3(CSV))
assert element.csv.path.startswith('s3://')
cfg = boto.Config()
aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id')
aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key')
options = dict(delimiter=element.delimiter,
ignore_header=int(element.header),
empty_as_null=True,
blanks_as_null=False,
compression=getattr(element, 'compression', ''))
if getattr(element, 'schema_name', None) is None:
# 'public' by default, this is a postgres convention
schema_name = (element.element.schema or
sa.inspect(element.bind).default_schema_name)
cmd = CopyCommand(schema_name=schema_name,
table_name=element.element.name,
data_location=element.csv.path,
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
options=options,
format='CSV')
return re.sub(r'\s+(;)', r'\1', re.sub(r'\s+', ' ', str(cmd))).strip()
@append.register(sa.Table, CSV)
def append_csv_to_sql_table(tbl, csv, **kwargs):
dialect = tbl.bind.dialect.name
# move things to a temporary S3 bucket if we're using redshift and we
# aren't already in S3
if dialect == 'redshift' and not isinstance(csv, S3(CSV)):
csv = convert(Temp(S3(CSV)), csv, **kwargs)
elif dialect != 'redshift' and isinstance(csv, S3(CSV)):
csv = convert(Temp(CSV), csv, has_header=csv.has_header, **kwargs)
elif dialect == 'hive':
from .ssh import SSH
return append(tbl, convert(Temp(SSH(CSV)), csv, **kwargs), **kwargs)
kwargs = merge(csv.dialect, kwargs)
stmt = CopyFromCSV(tbl, csv, **kwargs)
with tbl.bind.begin() as conn:
conn.execute(stmt)
return tbl
| {
"repo_name": "ywang007/odo",
"path": "odo/backends/sql_csv.py",
"copies": "1",
"size": "7714",
"license": "bsd-3-clause",
"hash": 5750614919694681000,
"line_mean": 37.1881188119,
"line_max": 82,
"alpha_frac": 0.5885403163,
"autogenerated": false,
"ratio": 4.142857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5231397459157143,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import sys
from os.path import isdir, isfile, join
from conda.compat import iteritems, PY3, text_type
from conda.utils import memoized, md5_file
import conda.config as cc
from conda.resolve import MatchSpec
from conda.cli.common import specs_from_url
from . import exceptions
try:
import yaml
from yaml import Loader, SafeLoader
except ImportError:
sys.exit('Error: could not import yaml (required to read meta.yaml '
'files of conda recipes)')
# Override the default string handling function to always return unicode
# objects (taken from StackOverflow)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
from conda_build.config import config
def ns_cfg():
# Remember to update the docs of any of this changes
plat = cc.subdir
py = config.CONDA_PY
np = config.CONDA_NPY
pl = config.CONDA_PERL
for x in py, np:
assert isinstance(x, int), x
d = dict(
linux = plat.startswith('linux-'),
linux32 = bool(plat == 'linux-32'),
linux64 = bool(plat == 'linux-64'),
osx = plat.startswith('osx-'),
unix = plat.startswith(('linux-', 'osx-')),
win = plat.startswith('win-'),
win32 = bool(plat == 'win-32'),
win64 = bool(plat == 'win-64'),
pl = pl,
py = py,
py3k = bool(30 <= py < 40),
py2k = bool(20 <= py < 30),
py26 = bool(py == 26),
py27 = bool(py == 27),
py33 = bool(py == 33),
py34 = bool(py == 34),
np = np,
os = os,
environ = os.environ,
)
for machine in cc.non_x86_linux_machines:
d[machine] = bool(plat == 'linux-%s' % machine)
d.update(os.environ)
return d
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
def select_lines(data, namespace):
lines = []
for i, line in enumerate(data.splitlines()):
line = line.rstrip()
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
try:
if eval(cond, namespace, {}):
lines.append(m.group(1))
except:
sys.exit('''\
Error: Invalid selector in meta.yaml line %d:
%s
''' % (i + 1, line))
sys.exit(1)
continue
lines.append(line)
return '\n'.join(lines) + '\n'
@memoized
def yamlize(data):
try:
return yaml.load(data)
except yaml.parser.ParserError as e:
try:
import jinja2
jinja2
except ImportError:
raise exceptions.UnableToParseMissingJinja2(original=e)
raise exceptions.UnableToParse(original=e)
def parse(data):
data = select_lines(data, ns_cfg())
res = yamlize(data)
# ensure the result is a dict
if res is None:
res = {}
for field in FIELDS:
if field in res and not isinstance(res[field], dict):
raise RuntimeError("The %s field should be a dict, not %s" % (field, res[field].__class__.__name__))
# ensure those are lists
for field in ('source/patches',
'build/entry_points', 'build/script_env',
'build/features', 'build/track_features',
'requirements/build', 'requirements/run',
'requirements/conflicts', 'test/requires',
'test/files', 'test/commands', 'test/imports'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
if res[section].get(key, None) is None:
res[section][key] = []
# ensure those are strings
for field in ('package/version', 'build/string', 'source/svn_rev',
'source/git_tag', 'source/git_branch', 'source/md5',
'source/git_rev', 'source/path'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
val = res[section].get(key, '')
if val is None:
val = ''
res[section][key] = text_type(val)
return sanitize(res)
def sanitize(meta):
"""
Sanitize the meta-data to remove aliases/handle deprecation
"""
# make a copy to avoid side-effects
meta = dict(meta)
sanitize_funs = [('source', _git_clean), ]
for section, func in sanitize_funs:
if section in meta:
meta[section] = func(meta[section])
return meta
def _git_clean(source_meta):
"""
Reduce the redundancy in git specification by removing git_tag and
git_branch.
If one is specified, copy to git_rev.
If more than one field is used to specified, exit
and complain.
"""
git_rev_tags_old = ('git_branch', 'git_tag')
git_rev = 'git_rev'
git_rev_tags = (git_rev,) + git_rev_tags_old
has_rev_tags = tuple(bool(source_meta[tag]) for
tag in git_rev_tags)
if sum(has_rev_tags) > 1:
msg = "Error: mulitple git_revs:"
msg += ', '.join("{}".format(key) for key, has in
zip(git_rev_tags, has_rev_tags) if has)
sys.exit(msg)
# make a copy of the input so we have no side-effects
ret_meta = dict(source_meta)
# loop over the old versions
for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):
# update if needed
if has:
ret_meta[git_rev_tags[0]] = ret_meta[key]
# and remove
del ret_meta[key]
return ret_meta
# If you update this please update the example in
# conda-docs/docs/source/build.rst
FIELDS = {
'package': ['name', 'version'],
'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',
'git_url', 'git_tag', 'git_branch', 'git_rev',
'hg_url', 'hg_tag',
'svn_url', 'svn_rev', 'svn_ignore_externals',
'patches'],
'build': ['number', 'string', 'entry_points', 'osx_is_app',
'features', 'track_features', 'preserve_egg_dir',
'no_link', 'binary_relocation', 'script', 'noarch_python',
'has_prefix_files', 'binary_has_prefix_files', 'script_env',
'detect_binary_files_with_prefix', 'rpaths',
'always_include_files', ],
'requirements': ['build', 'run', 'conflicts'],
'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',
'own_environment'],
'test': ['requires', 'commands', 'files', 'imports'],
'about': ['home', 'license', 'summary', 'readme'],
}
def check_bad_chrs(s, field):
bad_chrs = '=!@#$%^&*:;"\'\\|<>?/ '
if field in ('package/version', 'build/string'):
bad_chrs += '-'
for c in bad_chrs:
if c in s:
sys.exit("Error: bad character '%s' in %s: %s" % (c, field, s))
def get_contents(meta_path):
'''
Get the contents of the [meta.yaml|conda.yaml] file.
If jinja is installed, then the template.render function is called
before standard conda macro processors
'''
try:
import jinja2
except ImportError:
print("There was an error importing jinja2.", file=sys.stderr)
print("Please run `conda install jinja2` to enable jinja template support", file=sys.stderr)
with open(meta_path) as fd:
return fd.read()
from conda_build.jinja_context import context_processor
path, filename = os.path.split(meta_path)
loaders = [jinja2.PackageLoader('conda_build'),
jinja2.FileSystemLoader(path)
]
env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders))
env.globals.update(ns_cfg())
env.globals.update(context_processor())
template = env.get_or_select_template(filename)
contents = template.render(environment=env)
return contents
class MetaData(object):
def __init__(self, path):
assert isdir(path)
self.path = path
self.meta_path = join(path, 'meta.yaml')
self.requirements_path = join(path, 'requirements.txt')
if not isfile(self.meta_path):
self.meta_path = join(path, 'conda.yaml')
if not isfile(self.meta_path):
sys.exit("Error: meta.yaml or conda.yaml not found in %s" % path)
self.parse_again()
def parse_again(self):
"""Redo parsing for key-value pairs that are not initialized in the
first pass.
"""
if not self.meta_path:
return
self.meta = parse(get_contents(self.meta_path))
if isfile(self.requirements_path) and not self.meta['requirements']['run']:
self.meta.setdefault('requirements', {})
run_requirements = specs_from_url(self.requirements_path)
self.meta['requirements']['run'] = run_requirements
@classmethod
def fromdict(cls, metadata):
"""
Create a MetaData object from metadata dict directly.
"""
m = super(MetaData, cls).__new__(cls)
m.path = ''
m.meta_path = ''
m.meta = sanitize(metadata)
return m
def get_section(self, section):
return self.meta.get(section, {})
def get_value(self, field, default=None):
section, key = field.split('/')
return self.get_section(section).get(key, default)
def check_fields(self):
for section, submeta in iteritems(self.meta):
if section == 'extra':
continue
if section not in FIELDS:
sys.exit("Error: unknown section: %s" % section)
for key in submeta:
if key not in FIELDS[section]:
sys.exit("Error: in section %r: unknown key %r" %
(section, key))
def name(self):
res = self.get_value('package/name')
if not res:
sys.exit('Error: package/name missing in: %r' % self.meta_path)
res = text_type(res)
if res != res.lower():
sys.exit('Error: package/name must be lowercase, got: %r' % res)
check_bad_chrs(res, 'package/name')
return res
def version(self):
res = self.get_value('package/version')
if res is None:
sys.exit("Error: package/version missing in: %r" % self.meta_path)
check_bad_chrs(res, 'package/version')
return res
def build_number(self):
return int(self.get_value('build/number', 0))
def ms_depends(self, typ='run'):
res = []
name_ver_list = [
('python', config.CONDA_PY),
('numpy', config.CONDA_NPY),
('perl', config.CONDA_PERL),
('r', config.CONDA_R),
]
for spec in self.get_value('requirements/' + typ, []):
try:
ms = MatchSpec(spec)
except AssertionError:
raise RuntimeError("Invalid package specification: %r" % spec)
if ms.name == self.name():
raise RuntimeError("Error: %s cannot depend on itself" % self.name())
for name, ver in name_ver_list:
if ms.name == name:
if (ms.strictness != 1 or
self.get_value('build/noarch_python')):
continue
str_ver = text_type(ver)
if '.' not in str_ver:
str_ver = '.'.join(str_ver)
ms = MatchSpec('%s %s*' % (name, str_ver))
for c in '=!@#$%^&*:;"\'\\|<>?/':
if c in ms.name:
sys.exit("Error: bad character '%s' in package name "
"dependency '%s'" % (c, ms.name))
parts = spec.split()
if len(parts) >= 2:
if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:
msg = ("Error: bad character '%s' in package version "
"dependency '%s'" % (parts[1], ms.name))
if len(parts) >= 3:
msg += "\nPerhaps you meant '%s %s%s'" % (ms.name,
parts[1], parts[2])
sys.exit(msg)
res.append(ms)
return res
def build_id(self):
ret = self.get_value('build/string')
if ret:
check_bad_chrs(ret, 'build/string')
return ret
res = []
version_re = re.compile(r'(?:==)?(\d)\.(\d)')
for name, s in (('numpy', 'np'), ('python', 'py'), ('perl', 'pl'), ('r', 'r')):
for ms in self.ms_depends():
if ms.name == name:
try:
v = ms.spec.split()[1]
except IndexError:
res.append(s)
break
if any(i in v for i in ',|>!<'):
break
if name not in ['perl', 'r']:
match = version_re.match(v)
if match:
res.append(s + match.group(1) + match.group(2))
else:
res.append(s + v.strip('*'))
break
if res:
res.append('_')
res.append('%d' % self.build_number())
return ''.join(res)
def dist(self):
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
def pkg_fn(self):
return "%s.tar.bz2" % self.dist()
def is_app(self):
return bool(self.get_value('app/entry'))
def app_meta(self):
d = {'type': 'app'}
if self.get_value('app/icon'):
d['icon'] = '%s.png' % md5_file(join(
self.path, self.get_value('app/icon')))
for field, key in [('app/entry', 'app_entry'),
('app/type', 'app_type'),
('app/cli_opts', 'app_cli_opts'),
('app/summary', 'summary'),
('app/own_environment', 'app_own_environment')]:
value = self.get_value(field)
if value:
d[key] = value
return d
def info_index(self):
d = dict(
name = self.name(),
version = self.version(),
build = self.build_id(),
build_number = self.build_number(),
license = self.get_value('about/license'),
platform = cc.platform,
arch = cc.arch_name,
subdir = cc.subdir,
depends = sorted(ms.spec for ms in self.ms_depends())
)
if self.get_value('build/features'):
d['features'] = ' '.join(self.get_value('build/features'))
if self.get_value('build/track_features'):
d['track_features'] = ' '.join(self.get_value('build/track_features'))
if self.get_value('build/noarch_python'):
d['platform'] = d['arch'] = None
d['subdir'] = 'noarch'
if self.is_app():
d.update(self.app_meta())
return d
def has_prefix_files(self):
ret = self.get_value('build/has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def always_include_files(self):
return self.get_value('build/always_include_files', [])
def binary_has_prefix_files(self):
ret = self.get_value('build/binary_has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/binary_has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/binary_has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def __unicode__(self):
'''
String representation of the MetaData.
'''
return text_type(self.__dict__)
def __str__(self):
if PY3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf-8')
def __repr__(self):
'''
String representation of the MetaData.
'''
return self.__str__()
if __name__ == '__main__':
from pprint import pprint
from os.path import expanduser
m = MetaData(expanduser('~/conda-recipes/pycosat'))
pprint(m.info_index())
| {
"repo_name": "shastings517/conda-build",
"path": "conda_build/metadata.py",
"copies": "1",
"size": "16867",
"license": "bsd-3-clause",
"hash": -4633069554477546000,
"line_mean": 33.2825203252,
"line_max": 117,
"alpha_frac": 0.5270646825,
"autogenerated": false,
"ratio": 3.8247165532879817,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4851781235787982,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import time
import warnings
from collections import OrderedDict
from six import string_types
from six.moves.urllib.request import urlretrieve
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.parse import urljoin
import pandas as pd
import numpy as np
from .json import HttpJsonClient
from .error import DrmsQueryError, DrmsExportError, DrmsOperationNotSupported
from .utils import _pd_to_numeric_coerce, _split_arg, _extract_series_name
__all__ = ['SeriesInfo', 'ExportRequest', 'Client']
class SeriesInfo(object):
"""
DRMS series details.
Use :func:`Client.info` to create an instance.
Attributes
----------
name : string
Series name.
primekeys : list of strings
Series primekeys.
keywords : pandas.DataFrame
Details about series keywords.
links : pandas.DataFrame
Details about series links.
segments : pandas.DataFrame
Details about series segments.
note : string
Series description.
dbindex : list of strings
Series database index.
retention : int
Default retention time.
unitsize : int
Storage unit size.
archive : int
Series archive flag.
tapegroup : int
Tape group.
"""
def __init__(self, d, name=None):
self._d = d
self.name = name
self.retention = self._d.get('retention')
self.unitsize = self._d.get('unitsize')
self.archive = self._d.get('archive')
self.tapegroup = self._d.get('tapegroup')
self.note = self._d.get('note')
self.primekeys = self._d.get('primekeys')
self.dbindex = self._d.get('dbindex')
self.keywords = self._parse_keywords(d['keywords'])
self.links = self._parse_links(d['links'])
self.segments = self._parse_segments(d['segments'])
@staticmethod
def _parse_keywords(d):
keys = [
'name', 'type', 'recscope', 'defval', 'units', 'note', 'linkinfo']
res = []
for di in d:
resi = []
for k in keys:
resi.append(di.get(k))
res.append(tuple(resi))
if not res:
res = None # workaround for older pandas versions
res = pd.DataFrame(res, columns=keys)
res.index = res.pop('name')
res['is_time'] = (res.type == 'time')
res['is_integer'] = (res.type == 'short')
res['is_integer'] |= (res.type == 'int')
res['is_integer'] |= (res.type == 'longlong')
res['is_real'] = (res.type == 'float')
res['is_real'] |= (res.type == 'double')
res['is_numeric'] = (res.is_integer | res.is_real)
return res
@staticmethod
def _parse_links(d):
keys = ['name', 'target', 'kind', 'note']
res = []
for di in d:
resi = []
for k in keys:
resi.append(di.get(k))
res.append(tuple(resi))
if not res:
res = None # workaround for older pandas versions
res = pd.DataFrame(res, columns=keys)
res.index = res.pop('name')
return res
@staticmethod
def _parse_segments(d):
keys = ['name', 'type', 'units', 'protocol', 'dims', 'note']
res = []
for di in d:
resi = []
for k in keys:
resi.append(di.get(k))
res.append(tuple(resi))
if not res:
res = None # workaround for older pandas versions
res = pd.DataFrame(res, columns=keys)
res.index = res.pop('name')
return res
def __repr__(self):
if self.name is None:
return '<SeriesInfo>'
else:
return '<SeriesInfo "%s">' % self.name
class ExportRequest(object):
"""
Class for handling data export requests.
Use :func:`Client.export` or :func:`Client.export_from_id` to
create an instance.
Attributes
----------
id : string
Request ID.
status : int
Export request status.
urls : pandas.DataFrame
URLs of all downloadable files.
request_url : string
URL of the export request.
method : string
Data export method.
protocol : string
Data export protocol.
data : pandas.DataFrame
Records and filenames of the export request.
dir : string
Common directory of the requested files on the server.
tarfile : string
Filename, if a TAR file was requested.
keywords : string
Filename of textfile containing record keywords.
"""
_status_code_ok = 0
_status_code_notfound = 6
_status_codes_pending = [1, 2, _status_code_notfound]
_status_codes_ok_or_pending = [_status_code_ok] + _status_codes_pending
def __init__(self, d, client):
self._client = client
self._requestid = None
self._status = None
self._download_urls_cache = None
self._update_status(d)
@classmethod
def _create_from_id(cls, requestid, client):
d = client._json.exp_status(requestid)
return cls(d, client)
def __repr__(self):
idstr = str(None) if self._requestid is None else (
'"%s"' % self._requestid)
return '<ExportRequest id=%s, status=%d>' % (idstr, self._status)
@staticmethod
def _parse_data(d):
keys = ['record', 'filename']
res = None if d is None else [
(di.get(keys[0]), di.get(keys[1])) for di in d]
if not res:
res = None # workaround for older pandas versions
res = pd.DataFrame(res, columns=keys)
return res
def _update_status(self, d=None):
if d is None and self._requestid is not None:
d = self._client._json.exp_status(self._requestid)
self._d = d
self._d_time = time.time()
self._status = int(self._d.get('status', self._status))
self._requestid = self._d.get('requestid', self._requestid)
if self._requestid is None:
# Apparently 'reqid' is used instead of 'requestid' for certain
# protocols like 'mpg'
self._requestid = self._d.get('reqid')
if self._requestid == '':
# Use None if the requestid is empty (url_quick + as-is)
self._requestid = None
def _raise_on_error(self, notfound_ok=True):
if self._status in self._status_codes_ok_or_pending:
if self._status != self._status_code_notfound or notfound_ok:
return # request has not failed (yet)
msg = self._d.get('error')
if msg is None:
msg = 'DRMS export request failed.'
msg += ' [status=%d]' % self._status
raise DrmsExportError(msg)
def _generate_download_urls(self):
"""Generate download URLs for the current request."""
res = self.data.copy()
data_dir = self.dir
# Clear first record name for movies, as it is not a DRMS record.
if self.protocol in ['mpg', 'mp4']:
if res.record[0].startswith('movie'):
res.record[0] = None
# tar exports provide only a single TAR file with full path
if self.tarfile is not None:
data_dir = None
res = pd.DataFrame(
[(None, self.tarfile)], columns=['record', 'filename'])
# If data_dir is None, the filename column should contain the full
# path of the file and we need to extract the basename part. If
# data_dir contains a directory, the filename column should contain
# only the basename and we need to join it with the directory.
if data_dir is None:
res.rename(columns={'filename': 'fpath'}, inplace=True)
split_fpath = res.fpath.str.split('/')
res['filename'] = [sfp[-1] for sfp in split_fpath]
else:
res['fpath'] = data_dir + '/' + res.filename
if self.method.startswith('url'):
baseurl = self._client._server.http_download_baseurl
elif self.method.startswith('ftp'):
baseurl = self._client._server.ftp_download_baseurl
else:
raise RuntimeError(
'Download is not supported for export method "%s"' %
self.method)
# Generate download URLs.
urls = []
for fp in res.fpath:
while fp.startswith('/'):
fp = fp[1:]
urls.append(urljoin(baseurl, fp))
res['url'] = urls
# Remove rows with missing files.
res = res[res.filename != 'NoDataFile']
del res['fpath']
return res
@staticmethod
def _next_available_filename(fname):
"""Find next available filename, append a number if neccessary."""
i = 1
new_fname = fname
while os.path.exists(new_fname):
new_fname = '%s.%d' % (fname, i)
i += 1
return new_fname
@property
def id(self):
"""(string) Request ID."""
return self._requestid
@property
def status(self):
"""(int) Export request status."""
return self._status
@property
def method(self):
"""(string) Export method."""
return self._d.get('method')
@property
def protocol(self):
"""(string) Export protocol."""
return self._d.get('protocol')
@property
def dir(self):
"""(string) Common directory of the requested files on the server."""
if self.has_finished(skip_update=True):
self._raise_on_error()
else:
self.wait()
data_dir = self._d.get('dir')
return data_dir if data_dir else None
@property
def data(self):
"""
(pandas.DataFrame) Records and filenames of the export request.
Returns a pandas.DataFrame containing the records and filenames
of the export request (DataFrame columns: 'record', 'filename').
"""
if self.has_finished(skip_update=True):
self._raise_on_error()
else:
self.wait()
return self._parse_data(self._d.get('data'))
@property
def tarfile(self):
"""(string) Filename, if a TAR file was requested."""
if self.has_finished(skip_update=True):
self._raise_on_error()
else:
self.wait()
data_tarfile = self._d.get('tarfile')
return data_tarfile if data_tarfile else None
@property
def keywords(self):
"""(string) Filename of textfile containing record keywords."""
if self.has_finished(skip_update=True):
self._raise_on_error()
else:
self.wait()
data_keywords = self._d.get('keywords')
return data_keywords if data_keywords else None
@property
def request_url(self):
"""(string) URL of the export request."""
data_dir = self.dir
http_baseurl = self._client._server.http_download_baseurl
if data_dir is None or http_baseurl is None:
return None
if data_dir.startswith('/'):
data_dir = data_dir[1:]
return urljoin(http_baseurl, data_dir)
@property
def urls(self):
"""
(pandas.DataFrame) URLs of all downloadable files.
Returns a pandas.DataFrame containing the records, filenames
and URLs of the export request (DataFrame columns: 'record',
'filename' and 'url').
"""
if self._download_urls_cache is None:
self._download_urls_cache = self._generate_download_urls()
return self._download_urls_cache
def has_finished(self, skip_update=False):
"""
Check if the export request has finished.
Parameters
----------
skip_update : bool
If set to True, the export status will not be updated from
the server, even if it was in pending state after the last
status update.
Returns
-------
result : bool
True if the export request has finished or False if the
request is still pending.
"""
pending = self._status in self._status_codes_pending
if not pending:
return True
if not skip_update:
self._update_status()
pending = self._status in self._status_codes_pending
return not pending
def has_succeeded(self, skip_update=False):
"""
Check if the export request has finished successfully.
Parameters
----------
skip_update : bool
If set to True, the export status will not be updated from
the server, even if it was in pending state after the last
status update.
Returns
-------
result : bool
True if the export request has finished successfully or
False if the request failed or is still pending.
"""
if not self.has_finished(skip_update):
return False
return self._status == self._status_code_ok
def has_failed(self, skip_update=False):
"""
Check if the export request has finished unsuccessfully.
Parameters
----------
skip_update : bool
If set to True, the export status will not be updated from
the server, even if it was in pending state after the last
status update.
Returns
-------
result : bool
True if the export request has finished unsuccessfully or
False if the request has succeeded or is still pending.
"""
if not self.has_finished(skip_update):
return False
return self._status not in self._status_codes_ok_or_pending
def wait(self, timeout=None, sleep=5, retries_notfound=5, verbose=None):
"""
Wait for the server to process the export request. This method
continously updates the request status until the server signals
that the export request has succeeded or failed.
Parameters
----------
timeout : number or None
Maximum number of seconds until this method times out. If
set to None (the default), the status will be updated
indefinitely until the request succeeded or failed.
sleep : number or None
Time in seconds between status updates (defaults to 5
seconds). If set to None, a server supplied value is used.
retries_notfound : int
Number of retries in case the request was not found on the
server. Note that it usually takes a short time until a new
request is registered on the server, so a value too low
might cause an exception to be raised, even if the request
is valid and will eventually show up on the server.
verbose : bool or None
Set to True if status messages should be printed to stdout.
If set to None (default), the :attr:`Client.verbose` flag
of the associated client instance is used instead.
Returns
-------
result : bool
True if the request succeeded or False if a timeout
occured. In case of an error an exception is raised.
"""
if timeout is not None:
t_start = time.time()
timeout = float(timeout)
if sleep is not None:
sleep = float(sleep)
retries_notfound = int(retries_notfound)
if verbose is None:
verbose = self._client.verbose
# We are done, if the request has already finished.
if self.has_finished(skip_update=True):
self._raise_on_error()
return True
while True:
if verbose:
idstr = str(None) if self._requestid is None else (
'"%s"' % self._requestid)
print('Export request pending. [id=%s, status=%d]' % (
idstr, self._status))
# Use the user-provided sleep value or the server's wait value.
# In case neither is available, wait for 5 seconds.
wait_secs = self._d.get('wait', 5) if sleep is None else sleep
# Consider the time that passed since the last status update.
wait_secs -= (time.time() - self._d_time)
if wait_secs < 0:
wait_secs = 0
if timeout is not None:
# Return, if we would time out while sleeping.
if t_start + timeout + wait_secs - time.time() < 0:
return False
if verbose:
print('Waiting for %d seconds...' % round(wait_secs))
time.sleep(wait_secs)
if self.has_finished():
self._raise_on_error()
return True
elif self._status == self._status_code_notfound:
# Raise exception, if no retries are left.
if retries_notfound <= 0:
self._raise_on_error(notfound_ok=False)
if verbose:
print('Request not found on server, %d retries left.' %
retries_notfound)
retries_notfound -= 1
def download(self, directory, index=None, fname_from_rec=None,
verbose=None):
"""
Download data files.
By default, the server-side filenames are used as local
filenames, except for export method 'url_quick', where the
local filenames are generated from record names (see parameter
fname_from_rec). In case a file with the same name already
exists in the download directory, an ascending number is
appended to the filename.
Note: Downloading data segments that are directories, e.g. data
segments from series like "hmi.rdVflows_fd15_frame", is
currently not supported. In order to download data from series
like this, you need to use the export methods 'url-tar' or
'ftp-tar' when submitting the data export request.
Parameters
----------
directory : string
Download directory (must already exist).
index : int, list of ints or None
Index (or indices) of the file(s) to be downloaded. If set
to None (the default), all files of the export request are
downloaded. Note that this parameter is ignored for export
methods 'url-tar' and 'ftp-tar', where only a single tar
file is available for download.
fname_from_rec : bool or None
If True, local filenames are generated from record names.
If set to False, the original filenames are used. If set to
None (default), local filenames are generated only for
export method 'url_quick'. Exceptions: For exports with
methods 'url-tar' and 'ftp-tar', no filename will be
generated. This also applies to movie files from exports
with protocols 'mpg' or 'mp4', where the original filename
is used locally.
verbose : bool or None
Set to True if status messages should be printed to stdout.
If set to None (default), the :attr:`Client.verbose` flag
of the associated client instance is used instead.
Returns
-------
result : pandas.DataFrame
DataFrame containing the record string, download URL and
local location of each downloaded file (DataFrame columns:
'record', 'url' and 'download').
"""
out_dir = os.path.abspath(directory)
if not os.path.isdir(out_dir):
raise IOError('Download directory "%s" does not exist' % out_dir)
if np.isscalar(index):
index = [int(index)]
elif index is not None:
index = list(index)
if verbose is None:
verbose = self._client.verbose
# Wait until the export request has finished.
self.wait(verbose=verbose)
if fname_from_rec is None:
# For 'url_quick', generate local filenames from record strings.
if self.method == 'url_quick':
fname_from_rec = True
# self.urls contains the same records as self.data, except for the tar
# methods, where self.urls only contains one entry, the TAR file.
data = self.urls
if index is not None and self.tarfile is None:
data = data.iloc[index].copy()
ndata = len(data)
downloads = []
for i in range(ndata):
di = data.iloc[i]
if fname_from_rec:
filename = self._client._filename_from_export_record(
di.record, old_fname=di.filename)
if filename is None:
filename = di.filename
else:
filename = di.filename
fpath = os.path.join(out_dir, filename)
fpath_new = self._next_available_filename(fpath)
fpath_tmp = self._next_available_filename(fpath_new + '.part')
if verbose:
print('Downloading file %d of %d...' % (i + 1, ndata))
print(' record: %s' % di.record)
print(' filename: %s' % di.filename)
try:
urlretrieve(di.url, fpath_tmp)
except (HTTPError, URLError):
fpath_new = None
if verbose:
print(' -> Error: Could not download file')
else:
fpath_new = self._next_available_filename(fpath)
os.rename(fpath_tmp, fpath_new)
if verbose:
print(' -> "%s"' % os.path.relpath(fpath_new))
downloads.append(fpath_new)
res = data[['record', 'url']].copy()
res['download'] = downloads
return res
class Client(object):
"""
Client for remote DRMS server access.
Parameters
----------
server : string or ServerConfig
Registered server ID or ServerConfig instance.
Defaults to JSOC.
email : string or None
Default email address used data export requests.
verbose : bool
Print export status messages to stdout (disabled by default).
debug : bool
Print debug output (disabled by default).
Attributes
----------
email : string
Default email address used for data export requests.
verbose : bool
Enable/disable export status output.
debug : bool
Enable/disable debug output.
"""
def __init__(self, server='jsoc', email=None, verbose=False, debug=False):
self._json = HttpJsonClient(server=server, debug=debug)
self._info_cache = {}
self.verbose = verbose # use property for convertion to bool
self.email = email # use property for email validation
def __repr__(self):
return '<Client "%s">' % self._server.name
def _convert_numeric_keywords(self, ds, kdf, skip_conversion=None):
si = self.info(ds)
int_keys = list(si.keywords[si.keywords.is_integer].index)
num_keys = list(si.keywords[si.keywords.is_numeric].index)
num_keys += ['*recnum*', '*sunum*', '*size*']
if skip_conversion is None:
skip_conversion = []
elif isinstance(skip_conversion, string_types):
skip_conversion = [skip_conversion]
for k in kdf:
if k in skip_conversion:
continue
# pandas apparently does not support hexadecimal strings, so
# we need a special treatment for integer strings that start
# with '0x', like QUALITY. The following to_numeric call is
# still neccessary as the results are still Python objects.
if k in int_keys and kdf[k].dtype is np.dtype(object):
idx = kdf[k].str.startswith('0x')
if idx.any():
kdf.loc[idx, k] = kdf.loc[idx, k].map(
lambda x: int(x, base=16))
if k in num_keys:
kdf[k] = _pd_to_numeric_coerce(kdf[k])
@staticmethod
def _raise_query_error(d, status=None):
"""Raises a DrmsQueryError, using the json error message from d"""
if status is None:
status = d.get('status')
msg = d.get('error')
if msg is None:
msg = 'DRMS Query failed.'
msg += ' [status=%s]' % status
raise DrmsQueryError(msg)
def _generate_filenamefmt(self, sname):
"""Generate filename format string for export requests."""
try:
si = self.info(sname)
except:
# Cannot generate filename format for unknown series.
return None
pkfmt_list = []
for k in si.primekeys:
if si.keywords.loc[k].is_time:
pkfmt_list.append('{%s:A}' % k)
else:
pkfmt_list.append('{%s}' % k)
if pkfmt_list:
return '%s.%s.{segment}' % (si.name, '.'.join(pkfmt_list))
else:
return si.name + '.{recnum:%lld}.{segment}'
# Some regular expressions used to parse export request queries.
_re_export_recset = re.compile(
r'^\s*([\w\.]+)\s*(\[.*\])?\s*(?:\{([\w\s\.,]*)\})?\s*$')
_re_export_recset_pkeys = re.compile(r'\[([^\[^\]]*)\]')
_re_export_recset_slist = re.compile(r'[\s,]+')
@staticmethod
def _parse_export_recset(rs):
"""Parse export request record set."""
if rs is None:
return None, None, None
m = Client._re_export_recset.match(rs)
if not m:
return None, None, None
sname, pkeys, segs = m.groups()
if pkeys is not None:
pkeys = Client._re_export_recset_pkeys.findall(pkeys)
if segs is not None:
segs = Client._re_export_recset_slist.split(segs)
return sname, pkeys, segs
def _filename_from_export_record(self, rs, old_fname=None):
"""Generate a filename from an export request record."""
sname, pkeys, segs = self._parse_export_recset(rs)
if sname is None:
return None
# We need to identify time primekeys and change the time strings to
# make them suitable for filenames.
try:
si = self.info(sname)
except:
# Cannot generate filename for unknown series.
return None
if pkeys is not None:
n = len(pkeys)
if n != len(si.primekeys):
# Number of parsed pkeys differs from series definition.
return None
for i in range(n):
# Cleanup time strings.
if si.keywords.loc[si.primekeys[i]].is_time:
v = pkeys[i]
v = v.replace('.', '').replace(':', '').replace('-', '')
pkeys[i] = v
# Generate filename.
fname = si.name
if pkeys is not None:
pkeys = [k for k in pkeys if k.strip()]
pkeys_str = '.'.join(pkeys)
if pkeys_str:
fname += '.' + pkeys_str
if segs is not None:
segs = [s for s in segs if s.strip()]
segs_str = '.'.join(segs)
if segs_str:
fname += '.' + segs_str
if old_fname is not None:
# Try to use the file extension of the original filename.
known_fname_extensions = [
'.fits', '.txt', '.jpg', '.mpg', '.mp4', '.tar']
for ext in known_fname_extensions:
if old_fname.endswith(ext):
return fname + ext
return fname
# Export color table names, from (internal) series "jsoc.Color_Tables"
_export_color_table_names = [
'HMI_mag.lut',
'aia_131.lut',
'aia_1600.lut',
'aia_1700.lut',
'aia_171.lut',
'aia_193.lut',
'aia_211.lut',
'aia_304.lut',
'aia_335.lut',
'aia_4500.lut',
'aia_94.lut',
'aia_mixed',
'bb.sao',
'grey.sao',
'heat.sao']
# Export scaling types, from (internal) series "jsoc.Color_Tables"
_export_scaling_names = [
'LOG',
'MINMAX',
'MINMAXGIVEN',
'SQRT',
'mag']
@staticmethod
def _validate_export_protocol_args(protocol_args):
"""
Validate export protocol arguments.
"""
if protocol_args is None:
return
ct_key = 'ct'
ct = protocol_args.get(ct_key)
if ct is None:
ct_key = 'CT'
ct = protocol_args.get(ct_key)
if ct is not None:
ll = [s.lower() for s in Client._export_color_table_names]
try:
i = ll.index(ct.lower())
except ValueError:
msg = "'%s' is not a valid color table, " % ct
msg += 'available color tables: %s' % ', '.join(
["'%s'" % s for s in Client._export_color_table_names])
raise ValueError(msg)
protocol_args[ct_key] = Client._export_color_table_names[i]
scaling = protocol_args.get('scaling')
if scaling is not None:
ll = [s.lower() for s in Client._export_scaling_names]
try:
i = ll.index(scaling.lower())
except ValueError:
msg = "'%s' is not a valid scaling type, " % scaling
msg += 'available scaling types: %s' % ', '.join(
["'%s'" % s for s in Client._export_scaling_names])
raise ValueError(msg)
protocol_args['scaling'] = Client._export_scaling_names[i]
@property
def _server(self):
"""(ServerConfig) Remote server configuration."""
return self._json.server
@property
def debug(self):
"""(bool) Enable/disable debug output."""
return self._json.debug
@debug.setter
def debug(self, value):
self._json.debug = value
@property
def email(self):
"""(string) Default email address used for data export requests."""
return self._email
@email.setter
def email(self, value):
if value is not None and not self.check_email(value):
raise ValueError('Email address is invalid or not registered')
self._email = value
@property
def verbose(self):
"""(bool) Enable/disable export status output."""
return self._verbose
@verbose.setter
def verbose(self, value):
self._verbose = bool(value)
def series(self, regex=None, full=False):
"""
List available data series.
Parameters
----------
regex : string or None
Regular expression, used to select a subset of the
available series. If set to None, a list of all available
series is returned.
full : bool
If True, return a pandas.DataFrame containing additional
series information, like description and primekeys. If
False (default), the result is a list containing only the
series names.
Returns
-------
result : list or pandas.DataFrame
List of series names or DataFrame containing name,
primekeys and a description of the selected series (see
parameter ``full``).
"""
if not self._server.check_supported('series'):
raise DrmsOperationNotSupported(
'Server does not support series list access')
if self._server.url_show_series_wrapper is None:
# No wrapper CGI available, use the regular version.
d = self._json.show_series(regex)
status = d.get('status')
if status != 0:
self._raise_query_error(d)
if full:
keys = ('name', 'primekeys', 'note')
if not d['names']:
return pd.DataFrame(columns=keys)
recs = [(it['name'], _split_arg(it['primekeys']), it['note'])
for it in d['names']]
return pd.DataFrame(recs, columns=keys)
else:
if not d['names']:
return []
return [it['name'] for it in d['names']]
else:
# Use show_series_wrapper instead of the regular version.
d = self._json.show_series_wrapper(regex, info=full)
if full:
keys = ('name', 'note')
if not d['seriesList']:
return pd.DataFrame(columns=keys)
recs = []
for it in d['seriesList']:
name, info = tuple(it.items())[0]
note = info.get('description', '')
recs.append((name, note))
return pd.DataFrame(recs, columns=keys)
else:
return d['seriesList']
def info(self, ds):
"""
Get information about the content of a data series.
Parameters
----------
ds : string
Name of the data series.
Returns
-------
result : :class:`SeriesInfo`
SeriesInfo instance containing information about the data
series.
"""
if not self._server.check_supported('info'):
raise DrmsOperationNotSupported(
'Server does not support series info access')
name = _extract_series_name(ds)
if name is not None:
name = name.lower()
if name in self._info_cache:
return self._info_cache[name]
d = self._json.series_struct(name)
status = d.get('status')
if status != 0:
self._raise_query_error(d)
si = SeriesInfo(d, name=name)
if name is not None:
self._info_cache[name] = si
return si
def keys(self, ds):
"""
Get a list of keywords that are available for a series. Use
the :func:`info` method for more details.
Parameters
----------
ds : string
Name of the data series.
Returns
-------
result : list
List of keywords available for the selected series.
"""
si = self.info(ds)
return list(si.keywords.index)
def pkeys(self, ds):
"""
Get a list of primekeys that are available for a series. Use
the :func:`info` method for more details.
Parameters
----------
ds : string
Name of the data series.
Returns
-------
result : list
List of primekeys available for the selected series.
"""
si = self.info(ds)
return list(si.primekeys)
def get(self, ds, key=None, seg=None, link=None, convert_numeric=True,
skip_conversion=None):
"""
This method is deprecated. Use :func:`query` instead.
"""
warnings.warn(
'Client.get() is deprecated, use Client.query() instead',
DeprecationWarning)
return self.query(
ds, key=key, seg=seg, link=link, convert_numeric=convert_numeric,
skip_conversion=skip_conversion)
def query(self, ds, key=None, seg=None, link=None, convert_numeric=True,
skip_conversion=None, pkeys=False, rec_index=False, n=None):
"""
Query keywords, segments and/or links of a record set. At
least one of the parameters key, seg, link or pkeys needs to
be specified.
Parameters
----------
ds : string
Record set query.
key : string, list of strings or None
List of requested keywords, optional. If set to None
(default), no keyword results will be returned, except
when pkeys is True.
seg : string, list of strings or None
List of requested segments, optional. If set to None
(default), no segment results will be returned.
link : string, list of strings or None
List of requested Links, optional. If set to None
(default), no link results will be returned.
convert_numeric : bool
Convert keywords with numeric types from string to
numbers. This may result in NaNs for invalid/missing
values. Default is True.
skip_conversion : list of strings or None
List of keywords names to be skipped when performing a
numeric conversion. Default is None.
pkeys : bool
If True, all primekeys of the series are added to the
``key`` parameter.
rec_index : bool
If True, record names are used as index for the resulting
DataFrames.
n : int or None
Limits the number of records returned by the query. For
positive
values, the first n records of the record set are
returned, for negative values the last abs(n) records. If
set to None (default), no limit is applied.
Returns
-------
res_key : pandas.DataFrame, optional
Keyword query results. This DataFrame is only returned,
if key is not None or pkeys is set to True.
res_seg : pandas.DataFrame, optional
Segment query results. This DataFrame is only returned,
if seg is not None.
res_link : pandas.DataFrame, optional
Link query results. This DataFrame is only returned,
if link is not None.
"""
if not self._server.check_supported('query'):
raise DrmsOperationNotSupported(
'Server does not support DRMS queries')
if pkeys:
pk = self.pkeys(ds)
key = _split_arg(key) if key is not None else []
key = [k for k in key if k not in pk]
key = pk + key
lres = self._json.rs_list(
ds, key, seg, link, recinfo=rec_index, n=n)
status = lres.get('status')
if status != 0:
self._raise_query_error(lres)
res = []
if key is not None:
if 'keywords' in lres:
names = [it['name'] for it in lres['keywords']]
values = [it['values'] for it in lres['keywords']]
res_key = pd.DataFrame.from_dict(
OrderedDict(zip(names, values)))
else:
res_key = pd.DataFrame()
if convert_numeric:
self._convert_numeric_keywords(ds, res_key, skip_conversion)
res.append(res_key)
if seg is not None:
if 'segments' in lres:
names = [it['name'] for it in lres['segments']]
values = [it['values'] for it in lres['segments']]
res_seg = pd.DataFrame.from_dict(
OrderedDict(zip(names, values)))
else:
res_seg = pd.DataFrame()
res.append(res_seg)
if link is not None:
if 'links' in lres:
names = [it['name'] for it in lres['links']]
values = [it['values'] for it in lres['links']]
res_link = pd.DataFrame.from_dict(
OrderedDict(zip(names, values)))
else:
res_link = pd.DataFrame()
res.append(res_link)
if rec_index:
index = [it['name'] for it in lres['recinfo']]
for r in res:
r.index = index
if len(res) == 0:
return None
elif len(res) == 1:
return res[0]
else:
return tuple(res)
def check_email(self, email):
"""
Check if the email address is registered for data export.
You can register your email for data exports from JSOC on
the `JSOC email registration
<http://jsoc.stanford.edu/ajax/register_email.html>`__
webpage.
Parameters
----------
email : string
Email address to be checked.
Returns
-------
result : bool
True if the email address is valid and registered, False
otherwise.
"""
if not self._server.check_supported('email'):
raise DrmsOperationNotSupported(
'Server does not support user emails')
res = self._json.check_address(email)
status = res.get('status')
return status is not None and int(status) == 2
def export(self, ds, method='url_quick', protocol='as-is',
protocol_args=None, filenamefmt=None, n=None, email=None,
requestor=None):
"""
Submit a data export request.
A registered email address is required for data exports. You
can register your email address for data exports from JSOC on
the `JSOC email registration
<http://jsoc.stanford.edu/ajax/register_email.html>`__
webpage.
An interactive webinterface and additional information is
available on the `JSOC data export
<http://jsoc.stanford.edu/ajax/exportdata.html>`__ webpage.
Note that export requests that were submitted using the
webinterface can be accessed using the :func:`export_from_id`
method.
Parameters
----------
ds : string
Data export record set query.
method : string
Export method. Supported methods are: 'url_quick', 'url',
'url-tar', 'ftp' and 'ftp-tar'. Default is 'url_quick'.
protocol : string
Export protocol. Supported protocols are: 'as-is', 'fits',
'jpg', 'mpg' and 'mp4'. Default is 'as-is'.
protocol_args : dict
Extra protocol arguments for protocols 'jpg', 'mpg' and
'mp4'. Valid arguments are: 'ct', 'scaling', 'min', 'max'
and 'size'. See the JSOC data export webpage for more
details.
filenamefmt : string, None or False
Custom filename format string for exported files. This is
ignored for 'url_quick'/'as-is' data exports. If set to
None (default), the format string will be generated using
the primekeys of the data series. If set to False, the
filename format string will be omitted in the export
request.
n : int or None
Limits the number of records requested. For positive
values, the first n records of the record set are returned,
for negative values the last abs(n) records. If set to None
(default), no limit is applied.
email : string or None
Registered email address. If email is None (default), the
current default email address is used, which in this case
has to be set before calling export() by using the
:attr:`Client.email` attribute.
requestor : string, None or False
Export user ID. Default is None, in which case the user
name is determined from the email address. If set to False,
the requestor argument will be omitted in the export
request.
Returns
-------
result : :class:`ExportRequest`
"""
if not self._server.check_supported('export'):
raise DrmsOperationNotSupported(
'Server does not support export requests')
if email is None:
if self._email is None:
raise ValueError(
'The email argument is required, when no default email '
'address was set')
email = self._email
if filenamefmt is None:
sname = _extract_series_name(ds)
filenamefmt = self._generate_filenamefmt(sname)
elif filenamefmt is False:
filenamefmt = None
if protocol.lower() in ['jpg', 'mpg', 'mp4']:
self._validate_export_protocol_args(protocol_args)
d = self._json.exp_request(
ds, email, method=method, protocol=protocol,
protocol_args=protocol_args, filenamefmt=filenamefmt,
n=n, requestor=requestor)
return ExportRequest(d, client=self)
def export_from_id(self, requestid):
"""
Create an :class:`ExportRequest` instance from an existing
requestid.
Parameters
----------
requestid : string
Export request ID.
Returns
-------
result : :class:`ExportRequest`
"""
if not self._server.check_supported('export'):
raise DrmsOperationNotSupported(
'Server does not support export requests')
return ExportRequest._create_from_id(requestid, client=self)
def _test_info(c, ds):
sname = c.series(ds)
res = []
skiplist = [r'jsoc.*']
for sni in sname:
skipit = False
print(sni)
for spat in skiplist:
if re.match(spat, sni):
print('** skipping series **')
skipit = True
break
if not skipit:
res.append(c.info(sni))
return res
| {
"repo_name": "kbg/drms",
"path": "drms/client.py",
"copies": "1",
"size": "45619",
"license": "mit",
"hash": -6056168776419825000,
"line_mean": 34.7235708692,
"line_max": 78,
"alpha_frac": 0.5533440014,
"autogenerated": false,
"ratio": 4.381386861313868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434730862713868,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Qt
from glue.core import parse
from glue import core
from glue.utils import nonpartial
from glue.utils.qt import load_ui
from glue.utils.qt import CompletionTextEdit
__all__ = ['CustomComponentWidget']
def disambiguate(label, labels):
""" Changes name of label if it conflicts with labels list
Parameters
----------
label : str
The label to change the name of
labels : iterable
A list of all labels
Returns
-------
label : str
If needed, appended with a suffix "_{number}". The output does not
appear in labels
"""
label = label.replace(' ', '_')
if label not in labels:
return label
suffix = 1
while label + ('_%i' % suffix) in labels:
suffix += 1
return label + ('_%i' % suffix)
class ColorizedCompletionTextEdit(CompletionTextEdit):
updated = QtCore.Signal()
def insertPlainText(self, *args):
super(ColorizedCompletionTextEdit, self).insertPlainText(*args)
self.reformat_text()
self.updated.emit()
def keyReleaseEvent(self, event):
super(ColorizedCompletionTextEdit, self).keyReleaseEvent(event)
self.reformat_text()
self.updated.emit()
def reformat_text(self):
# Here every time a key is released, we re-colorize the expression.
# We show valid components in blue, and invalid ones in red. We
# recognized components because they contain a ":" which is not valid
# Python syntax (except if one considers lambda functions, but we can
# probably ignore that here)
text = self.toPlainText()
# If there are no : in the text we don't need to do anything
if not ":" in text:
return
pattern = '[^\\s]*:[^\\s]*'
def format_components(m):
component = m.group(0)
if component in self.word_list:
return "<font color='#0072B2'><b>" + component + "</b></font> "
else:
return "<font color='#D55E00'><b>" + component + "</b></font> "
html = re.sub(pattern, format_components, text)
tc = self.textCursor()
pos = tc.position()
self.setHtml(html)
# Sometimes the HTML gets rid of double spaces so we have to make
# sure the position isn't greater than the text length.
text = self.toPlainText()
pos = min(pos, len(text))
tc.setPosition(pos)
self.setTextCursor(tc)
self.setAlignment(Qt.AlignCenter)
class CustomComponentWidget(QtWidgets.QDialog):
"""
Dialog to add derived components to data via parsed commands.
"""
def __init__(self, collection, parent=None):
super(CustomComponentWidget, self).__init__(parent=parent)
# Load in ui file to set up widget
self.ui = load_ui('widget.ui', self,
directory=os.path.dirname(__file__))
# In the ui file we do not create the text field for the expression
# because we want to use a custom widget that supports auto-complete.
self.ui.expression.setAlignment(Qt.AlignCenter)
self._labels = {}
self._data = {}
self._collection = collection
self._gather_components()
self._gather_data()
self._init_widgets()
self._connect()
# Set up auto-completion. While the auto-complete window is open, we
# cannot add/remove datasets or other components, so we can populate
# the auto_completer straight off.
self.ui.expression.set_word_list(list(self._labels.keys()))
self.ui.button_ok.clicked.connect(self.accept)
self.ui.button_cancel.clicked.connect(self.reject)
self.ui.expression.updated.connect(self._update_status)
self._update_status()
def _update_status(self):
if str(self.ui.expression.toPlainText()) == "":
self.ui.label_status.setText("")
self.ui.button_ok.setEnabled(False)
else:
try:
pc = self._get_parsed_command()
pc.evaluate_test()
except SyntaxError:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText("Incomplete or invalid syntax")
self.ui.button_ok.setEnabled(False)
except parse.InvalidTagError as exc:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText("Invalid component: {0}".format(exc.tag))
self.ui.button_ok.setEnabled(False)
except Exception as exc:
self.ui.label_status.setStyleSheet('color: red')
self.ui.label_status.setText(str(exc))
self.ui.button_ok.setEnabled(False)
else:
self.ui.label_status.setStyleSheet('color: green')
self.ui.label_status.setText("Valid expression")
self.ui.button_ok.setEnabled(True)
def _connect(self):
cl = self.ui.component_list
cl.itemDoubleClicked.connect(self._add_to_expression)
def _init_widgets(self):
"""
Set up default state of widget
"""
comps = self.ui.component_list
comps.addItems(sorted(self._labels.keys()))
data = self.ui.data_list
data.addItems(sorted(self._data.keys()))
def _gather_components(self):
"""
Build a mapping from unique labels -> componentIDs
"""
comps = set()
for data in self._collection:
for c in data.components:
if c in comps:
continue
label = "%s:%s" % (data.label, c)
label = disambiguate(label, self._labels)
self._labels[label] = c
comps.add(c)
def _gather_data(self):
"""
Build a mapping from unique labels -> data objects
"""
for data in self._collection:
label = data.label
label = disambiguate(label, self._data)
self._data[label] = data
def _selected_data(self):
"""
Yield all data objects that are selected in the DataList
"""
for items in self.ui.data_list.selectedItems():
yield self._data[str(items.text())]
def _create_link(self):
"""
Create a ComponentLink from the state of the GUI
Returns
-------
A new component link
"""
pc = self._get_parsed_command()
label = str(self.ui.new_label.text()) or 'new component'
new_id = core.data.ComponentID(label)
link = parse.ParsedComponentLink(new_id, pc)
return link
def _get_parsed_command(self):
expression = str(self.ui.expression.toPlainText())
# To maintain backward compatibility with previous versions of glue,
# we add curly brackets around the components in the expression.
pattern = '[^\\s]*:[^\\s]*'
def add_curly(m):
return "{" + m.group(0) + "}"
expression = re.sub(pattern, add_curly, expression)
return parse.ParsedCommand(expression, self._labels)
@property
def _number_targets(self):
"""
How many targets are selected
"""
return len(self.ui.data_list.selectedItems())
def _add_link_to_targets(self, link):
"""
Add a link to all the selected data
"""
for target in self._selected_data():
target.add_component_link(link)
def _add_to_expression(self, item):
"""
Add a component list item to the expression editor
"""
addition = '%s ' % item.text()
expression = self.ui.expression
expression.insertPlainText(addition)
def accept(self):
if self._number_targets == 0:
QtWidgets.QMessageBox.critical(self.ui, "Error", "Please specify the target dataset(s)",
buttons=QtWidgets.QMessageBox.Ok)
elif len(self.ui.new_label.text()) == 0:
QtWidgets.QMessageBox.critical(self.ui, "Error", "Please specify the new component name",
buttons=QtWidgets.QMessageBox.Ok)
else:
link = self._create_link()
if link:
self._add_link_to_targets(link)
super(CustomComponentWidget, self).accept()
def main():
from glue.core.data import Data
from glue.core.data_collection import DataCollection
import numpy as np
x = np.random.random((5, 5))
y = x * 3
data = DataCollection(Data(label='test', x=x, y=y))
widget = CustomComponentWidget(data)
widget.exec_()
for d in data:
print(d.label)
for c in d.components:
print('\t%s' % c)
if __name__ == "__main__":
from glue.utils.qt import get_qapp
app = get_qapp()
main()
| {
"repo_name": "saimn/glue",
"path": "glue/dialogs/custom_component/qt/widget.py",
"copies": "3",
"size": "9093",
"license": "bsd-3-clause",
"hash": -950441107761636700,
"line_mean": 31.1307420495,
"line_max": 101,
"alpha_frac": 0.5836357638,
"autogenerated": false,
"ratio": 4.153951576062129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6237587339862128,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import requests
import logging
from functools import wraps
from subprocess import Popen, PIPE
from .utils import shell_out
from .exceptions import YARNException
logger = logging.getLogger(__name__)
def check_app_id(func):
@wraps(func)
def wrapper(*args, **kwargs):
cls = args[0]
app_id = args[1]
if app_id not in cls.apps:
raise YARNException("{0}: not a valid Application "
"Id".format(app_id))
return func(*args, **kwargs)
return wrapper
class YARNAPI(object):
def __init__(self, rm, rm_port):
self.rm = rm
self.rm_port = rm_port
self.host_port = "{0}:{1}".format(self.rm, self.rm_port)
@property
def apps(self):
url = "http://{0}/ws/v1/cluster/apps/".format(self.host_port)
logger.debug("Getting Resource Manager Info: {0}".format(url))
r = requests.get(url)
data = r.json()
logger.debug(data)
if not data['apps']:
return []
apps = [d['id'] for d in data['apps']['app']]
return apps
@check_app_id
def logs(self, app_id, shell=False):
"""
Collect logs from RM (if running)
With shell=True, collect logs from HDFS after job completion
Parameters
----------
app_id: str
A yarn application ID string
shell: bool
Shell out to yarn CLI (default False)
Returns
-------
log: dictionary
logs from each container (when possible)
"""
if shell:
cmd = ["yarn", "logs", "-applicationId", app_id]
out = shell_out(cmd)
return str(out)
host_port = "{0}:{1}".format(self.rm, self.rm_port)
url = "http://{0}/ws/v1/cluster/apps/{1}".format(host_port, app_id)
logger.debug("Getting Resource Manager Info: {0}".format(url))
r = requests.get(url)
data = r.json()
logger.debug(data)
try:
amHostHttpAddress = data['app']['amHostHttpAddress']
except KeyError:
msg = "Local logs unavailable. State: {0} finalStatus: {1} Possibly check logs " \
"with `yarn logs -applicationId`".format(data['app']['state'],
data['app']['finalStatus'])
raise Exception(msg)
url = "http://{0}/ws/v1/node/containers".format(amHostHttpAddress)
r = requests.get(url)
data = r.json()['containers']
if not data:
raise YARNException("No container logs available")
container = data['container']
logger.debug(container)
# container_1452274436693_0001_01_000001
def get_app_id_num(x):
return "_".join(x.split("_")[1:3])
app_id_num = get_app_id_num(app_id)
containers = [d for d in container if get_app_id_num(d['id']) == app_id_num]
logs = {}
for c in containers:
log=dict(nodeId=c['nodeId'])
# grab stdout
url = "{0}/stdout/?start=0".format(c['containerLogsLink'])
logger.debug("Gather stdout/stderr data from {0}: {1}".format(c['nodeId'], url))
r = requests.get(url)
log['stdout'] = r.text
# grab stderr
url = "{0}/stderr/?start=0".format(c['containerLogsLink'])
r = requests.get(url)
log['stderr'] = r.text
logs[c['id']] = log
return logs
@check_app_id
def status(self, app_id):
""" Get status of an application
Parameters
----------
app_id: str
A yarn application ID string
Returns
-------
log: dictionary
status of application
"""
host_port = "{0}:{1}".format(self.rm, self.rm_port)
url = "http://{0}/ws/v1/cluster/apps/{1}".format(host_port, app_id)
logger.debug("Getting Application Info: {0}".format(url))
r = requests.get(url)
data = r.json()
return data
@check_app_id
def kill(self, app_id):
"""
Method to kill a yarn application
Parameters
----------
app_id: str
YARN application id
Returns
-------
bool:
True if successful, False otherwise.
"""
cmd = ["yarn", "application", "-kill", app_id]
# need Popen because YARN killed message occurs on stderr
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
return any("Killed application" in s for s in [str(out), str(err)])
| {
"repo_name": "NielsZeilemaker/knit",
"path": "knit/yarn_api.py",
"copies": "1",
"size": "4820",
"license": "bsd-3-clause",
"hash": 1266856071328565800,
"line_mean": 27.5207100592,
"line_max": 94,
"alpha_frac": 0.5298755187,
"autogenerated": false,
"ratio": 3.9411283728536386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9966486067380242,
"avg_score": 0.0009035648346792249,
"num_lines": 169
} |
from __future__ import absolute_import, division, print_function
import os
import shutil
from os.path import expanduser
import tarfile
import requests
from appr import SYSTEM
from appr.commands.command_base import CommandBase
from appr.utils import mkdir_p, get_current_script_path
LOCAL_DIR = os.path.dirname(__file__)
def install_helm_plugin(plugin, plugin_info):
version = plugin['name']
tarball_src = ("https://github.com/%s/releases/download/%s/helm-registry_%s.tar.gz" %
(plugin_info['repo'], version, SYSTEM))
helm_home = os.getenv("HELM_HOME", os.path.join(expanduser("~"), ".helm"))
plugin_path = os.getenv("HELM_PLUGIN_DIR", os.path.join(helm_home, "plugins"))
mkdir_p(plugin_path)
tardest = os.path.join(plugin_path, "appr-helm-plugin-%s.tar.gz" % version)
res = requests.get(tarball_src)
res.raise_for_status()
with open(tardest, "wb") as f:
f.write(res.content)
tar = tarfile.open(tardest, 'r:gz')
tar.extractall(plugin_path)
bin_path = os.path.join(plugin_path, "registry/appr")
if os.path.exists(bin_path):
os.remove(bin_path)
execscript = get_current_script_path()
if SYSTEM != "windows":
os.symlink(get_current_script_path(), bin_path)
else:
shutil.copy(get_current_script_path(), bin_path)
return {
'source': tarball_src,
'plugin-version': version,
'status': 'installed',
'platform': SYSTEM,
'path': os.path.join(plugin_path, 'registry'),
'symlink': execscript
}
class PluginsCmd(CommandBase):
name = 'plugins'
help_message = "Install plugins"
plugins = {
'helm': {
'repo': 'app-registry/appr-helm-plugin',
'install_method': install_helm_plugin
}
}
output_default = "yaml"
def __init__(self, options):
super(PluginsCmd, self).__init__(options)
self.plugin = options.plugin
self.status = ""
@classmethod
def get_latest_plugin(cls, plugin_name):
path = "https://api.github.com/repos/%s/tags" % cls.plugins[plugin_name]['repo']
resp = requests.get(path)
resp.raise_for_status()
json_resp = resp.json()
return json_resp[0]
@classmethod
def _init_args(cls, subcmd):
subcmd.add_argument("plugin", choices=['helm'], help='plugin')
@classmethod
def _install(cls, options, unknown=None):
plugin_name = options.plugin
plugin = cls.get_latest_plugin(plugin_name)
cmd = cls(options)
cmd.status = cls.plugins[plugin_name]['install_method'](plugin, cls.plugins[plugin_name])
cmd.render()
@classmethod
def _add_arguments(cls, parser):
sub = parser.add_subparsers()
install_cmd = sub.add_parser('install')
cls._init_args(install_cmd)
install_cmd.set_defaults(func=cls._install)
def _render_dict(self):
return self.status
def _render_console(self):
return self.status
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/plugins.py",
"copies": "2",
"size": "3024",
"license": "apache-2.0",
"hash": -1903250345991551500,
"line_mean": 28.9405940594,
"line_max": 97,
"alpha_frac": 0.6226851852,
"autogenerated": false,
"ratio": 3.4718714121699197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004415776294161216,
"num_lines": 101
} |
from __future__ import (absolute_import, division, print_function)
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import glob
from distutils.errors import CompileError
from distutils.sysconfig import get_config_var
from .util import (
get_abspath, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, import_module_from_file, pyx_is_cplus,
sha256_of_string, sha256_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
sharedext = get_config_var('EXT_SUFFIX' if sys.version_info >= (3, 3) else 'SO')
if os.name == 'posix':
objext = '.o'
elif os.name == 'nt':
objext = '.obj'
else:
warnings.warn("Unknown os.name: {}".format(os.name))
objext = '.o'
def compile_sources(files, Runner=None, destdir=None, cwd=None, keep_dir_struct=False,
per_file_kwargs=None, **kwargs):
""" Compile source code files to object files.
Parameters
==========
files : iterable of str
Paths to source files, if ``cwd`` is given, the paths are taken as relative.
Runner: CompilerRunner subclass (optional)
Could be e.g. ``FortranCompilerRunner``. Will be inferred from filename
extensions if missing.
destdir: str
Output directory, if cwd is given, the path is taken as relative.
cwd: str
Working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: ``False``
per_file_kwargs: dict
Dict mapping instances in ``files`` to keyword arguments.
\\*\\*kwargs: dict
Default keyword arguments to pass to ``Runner``.
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(f, Runner, cwd=cwd, **file_kwargs))
return dstpaths
def get_mixed_fort_c_linker(vendor=None, cplus=False, cwd=None):
vendor = vendor or os.environ.get('SYMPY_COMPILER_VENDOR', 'gnu')
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def link(obj_files, out_file=None, shared=False, Runner=None,
cwd=None, cplus=False, fort=False, **kwargs):
""" Link object files.
Parameters
==========
obj_files: iterable of str
Paths to object files.
out_file: str (optional)
Path to executable/shared library, if ``None`` it will be
deduced from the last item in obj_files.
shared: bool
Generate a shared library?
Runner: CompilerRunner subclass (optional)
If not given the ``cplus`` and ``fort`` flags will be inspected
(fallback is the C compiler).
cwd: str
Path to the root of relative paths and working directory for compiler.
cplus: bool
C++ objects? default: ``False``.
fort: bool
Fortran objects? default: ``False``.
\\*\\*kwargs: dict
Keyword arguments passed to ``Runner``.
Returns
=======
The absolute path to the generated shared object / executable.
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not Runner:
if fort:
Runner, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
if k in kwargs:
kwargs[k].expand(v)
else:
kwargs[k] = v
else:
if cplus:
Runner = CppCompilerRunner
else:
Runner = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("run_linker was set to False (nonsensical).")
out_file = get_abspath(out_file, cwd=cwd)
runner = Runner(obj_files, out_file, flags, cwd=cwd, **kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
""" Link python extension module (shared object) for importing
Parameters
==========
obj_files: iterable of str
Paths to object files to be linked.
so_file: str
Name (path) of shared object file to create. If not specified it will
have the basname of the last object file in `obj_files` but with the
extension '.so' (Unix).
cwd: path string
Root of relative paths and working directory of linker.
libraries: iterable of strings
Libraries to link against, e.g. ['m'].
cplus: bool
Any C++ objects? default: ``False``.
fort: bool
Any Fortran objects? default: ``False``.
kwargs**: dict
Keyword arguments passed to ``link(...)``.
Returns
=======
Absolute path to the generate shared object.
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = sysconfig.get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, **cy_kwargs):
""" Generates a C file from a Cython source file.
Parameters
==========
src: str
Path to Cython source.
destdir: str (optional)
Path to output directory (default: '.').
cwd: path string (optional)
Root of relative paths (default: '.').
**cy_kwargs:
Second argument passed to cy_compile. Generates a .cpp file if ``cplus=True`` in ``cy_kwargs``,
else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
cy_result = cy_compile([src], cy_options)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name), destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, None), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, Runner=None, objpath=None, cwd=None, inc_py=False, **kwargs):
""" Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
==========
srcpath: str
Path to source file.
Runner: CompilerRunner subclass (optional)
If ``None``: deduced from extension of srcpath.
objpath : str (optional)
Path to generated object. If ``None``: deduced from ``srcpath``.
cwd: str (optional)
Working directory and root of relative paths. If ``None``: current dir.
inc_py: bool
Add Python include path to kwarg "include_dirs". Default: False
\\*\\*kwargs: dict
keyword arguments passed to Runner or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+objext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath, include_dirs=include_dirs, cwd=cwd,
**kwargs)
if Runner is None:
Runner, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
flags = kwargs.pop('flags', [])
needed_flags = ('-fPIC',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompileError("src2obj called with run_linker=True")
runner = Runner([srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, flags=flags, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, destdir=None, cwd=None,
include_dirs=None, cy_kwargs=None, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
==========
pyxpath: str
Path to Cython source file.
objpath: str (optional)
Path to object file to generate.
destdir: str (optional)
Directory to put generated C file. When ``None``: directory of ``objpath``.
cwd: str (optional)
Working directory and root of relative paths.
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
Keyword arguments passed onto `simple_cythonize`
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using ``.util.pyx_is_cplus``.
compile_kwargs: dict
keyword arguments passed onto src2obj
Returns
=======
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
destdir = destdir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
interm_c_file = simple_cythonize(pyxpath, destdir=destdir, cwd=cwd, **cy_kwargs)
include_dirs = include_dirs or []
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread', '-fPIC')
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompileError("Cython requires strict aliasing to be disabled.")
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(interm_c_file, objpath=objpath, cwd=cwd,
include_dirs=include_dirs, flags=flags, std=std,
options=options, inc_py=True, strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fortran_src(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus_src(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(sources, extname=None, build_dir='.', compile_kwargs=None,
link_kwargs=None):
""" Compiles sources to a shared object (python extension) and imports it
Sources in ``sources`` which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
==========
sources : string
List of paths to sources.
extname : string
Name of extension (default: ``None``).
If ``None``: taken from the last file in ``sources`` without extension.
build_dir: str
Path to directory in which objects files etc. are generated.
compile_kwargs: dict
keyword arguments passed to ``compile_sources``
link_kwargs: dict
keyword arguments passed to ``link_py_so``
Returns
=======
The imported module from of the python extension.
Examples
========
>>> mod = compile_link_import_py_ext(['fft.f90', 'conv.cpp', '_fft.pyx']) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
if extname is None:
extname = os.path.splitext(os.path.basename(sources[-1]))[0]
compile_kwargs = compile_kwargs or {}
link_kwargs = link_kwargs or {}
try:
mod = import_module_from_file(os.path.join(build_dir, extname), sources)
except ImportError:
objs = compile_sources(list(map(get_abspath, sources)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(objs, cwd=build_dir, fort=any_fortran_src(sources),
cplus=any_cplus_src(sources), **link_kwargs)
mod = import_module_from_file(so)
return mod
def _write_sources_to_build_dir(sources, build_dir):
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
for name, src in sources:
dest = os.path.join(build_dir, name)
differs = True
sha256_in_mem = sha256_of_string(src.encode('utf-8')).hexdigest()
if os.path.exists(dest):
if os.path.exists(dest+'.sha256'):
sha256_on_disk = open(dest+'.sha256', 'rt').read()
else:
sha256_on_disk = sha256_of_file(dest).hexdigest()
differs = sha256_on_disk != sha256_in_mem
if differs:
with open(dest, 'wt') as fh:
fh.write(src)
open(dest+'.sha256', 'wt').write(sha256_in_mem)
source_files.append(dest)
return source_files, build_dir
def compile_link_import_strings(sources, build_dir=None, **kwargs):
""" Compiles, links and imports extension module from source.
Parameters
==========
sources : iterable of name/source pair tuples
build_dir : string (default: None)
Path. ``None`` implies use a temporary directory.
**kwargs:
Keyword arguments passed onto `compile_link_import_py_ext`.
Returns
=======
mod : module
The compiled and imported extension module.
info : dict
Containing ``build_dir`` as 'build_dir'.
"""
source_files, build_dir = _write_sources_to_build_dir(sources, build_dir)
mod = compile_link_import_py_ext(source_files, build_dir=build_dir, **kwargs)
info = dict(build_dir=build_dir)
return mod, info
def compile_run_strings(sources, build_dir=None, clean=False, compile_kwargs=None, link_kwargs=None):
""" Compiles, links and runs a program built from sources.
Parameters
==========
sources : iterable of name/source pair tuples
build_dir : string (default: None)
Path. ``None`` implies use a temporary directory.
clean : bool
Whether to remove build_dir after use. This will only have an
effect if ``build_dir`` is ``None`` (which creates a temporary directory).
Passing ``clean == True`` and ``build_dir != None`` raises a ``ValueError``.
This will also set ``build_dir`` in returned info dictionary to ``None``.
compile_kwargs: dict
Keyword arguments passed onto ``compile_sources``
link_kwargs: dict
Keyword arguments passed onto ``link``
Returns
=======
(stdout, stderr): pair of strings
info: dict
Containing exit status as 'exit_status' and ``build_dir`` as 'build_dir'
"""
if clean and build_dir is not None:
raise ValueError("Automatic removal of build_dir is only available for temporary directory.")
try:
source_files, build_dir = _write_sources_to_build_dir(sources, build_dir)
objs = compile_sources(list(map(get_abspath, source_files)), destdir=build_dir,
cwd=build_dir, **(compile_kwargs or {}))
prog = link(objs, cwd=build_dir,
fort=any_fortran_src(source_files),
cplus=any_cplus_src(source_files), **(link_kwargs or {}))
p = subprocess.Popen([prog], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exit_status = p.wait()
stdout, stderr = [txt.decode('utf-8') for txt in p.communicate()]
finally:
if clean and os.path.isdir(build_dir):
shutil.rmtree(build_dir)
build_dir = None
info = dict(exit_status=exit_status, build_dir=build_dir)
return (stdout, stderr), info
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/utilities/_compilation/compilation.py",
"copies": "1",
"size": "20653",
"license": "bsd-3-clause",
"hash": 2992897030267556400,
"line_mean": 31.6787974684,
"line_max": 103,
"alpha_frac": 0.59967075,
"autogenerated": false,
"ratio": 3.8303041543026706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4929974904302671,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import signal
import subprocess
from contextlib import contextmanager
try:
from urllib import urlopen
from urlparse import urlparse
except ImportError:
from urllib.request import urlopen
from urllib.parse import urlparse
# Scientific stack.
import numpy as np
import numpy.ma as ma
from pandas import read_csv
from netCDF4 import Dataset, date2index, num2date
import lxml.html
rootpath = os.path.split(__file__)[0]
df = read_csv(os.path.join(rootpath, 'data', 'climatology_data_sources.csv'))
style = os.path.join(rootpath, 'data', 'style.css')
__all__ = ['rot2d',
'shrink',
'get_roms',
'css_styles',
'to_html',
'make_map',
'embed_html',
'get_coordinates',
'parse_url',
'url_lister',
'time_limit',
'TimeoutException',
'make_qr',
'nbviewer_link',
'save_html']
# ROMS.
def rot2d(x, y, ang):
"""
Rotate vectors by geometric angle.
Examples
--------
>>> import numpy as np
>>> x, y = rot2d(1, 0, np.deg2rad(90))
>>> np.allclose([0, 1], [x, y])
True
"""
xr = x * np.cos(ang) - y * np.sin(ang)
yr = x * np.sin(ang) + y * np.cos(ang)
return xr, yr
def shrink(a, b):
"""Return array shrunk to fit a specified shape by trimming or averaging.
a = shrink(array, shape)
array is an numpy ndarray, and shape is a tuple (e.g., from
array.shape). `a` is the input array shrunk such that its maximum
dimensions are given by shape. If shape has more dimensions than
array, the last dimensions of shape are fit.
as, bs = shrink(a, b)
If the second argument is also an array, both a and b are shrunk to
the dimensions of each other. The input arrays must have the same
number of dimensions, and the resulting arrays will have the same
shape.
Examples
--------
>>> import numpy as np
>>> rand = np.random.rand
>>> shrink(rand(10, 10), (5, 9, 18)).shape
(9, 10)
>>> list(map(np.shape, shrink(rand(10, 10, 10), rand(5, 9, 18))))
[(5, 9, 10), (5, 9, 10)]
"""
if isinstance(b, np.ndarray):
if not len(a.shape) == len(b.shape):
raise Exception('Input arrays must have the same number of'
'dimensions')
a = shrink(a, b.shape)
b = shrink(b, a.shape)
return (a, b)
if isinstance(b, int):
b = (b,)
if len(a.shape) == 1: # 1D array is a special case
dim = b[-1]
while a.shape[0] > dim: # Only shrink a.
if (dim - a.shape[0]) >= 2: # Trim off edges evenly.
a = a[1:-1]
else: # Or average adjacent cells.
a = 0.5*(a[1:] + a[:-1])
else:
for dim_idx in range(-(len(a.shape)), 0):
dim = b[dim_idx]
a = a.swapaxes(0, dim_idx) # Put working dim first
while a.shape[0] > dim: # Only shrink a
if (a.shape[0] - dim) >= 2: # trim off edges evenly
a = a[1:-1, :]
if (a.shape[0] - dim) == 1: # Or average adjacent cells.
a = 0.5*(a[1:, :] + a[:-1, :])
a = a.swapaxes(0, dim_idx) # Swap working dim back.
return a
def get_roms(url, time_slice, n=3):
url = parse_url(url)
with Dataset(url) as nc:
ncv = nc.variables
time = ncv['ocean_time']
tidx = date2index(time_slice, time, select='nearest')
time = num2date(time[tidx], time.units, time.calendar)
mask = ncv['mask_rho'][:]
lon_rho = ncv['lon_rho'][:]
lat_rho = ncv['lat_rho'][:]
anglev = ncv['angle'][:]
u = ncv['u'][tidx, -1, ...]
v = ncv['v'][tidx, -1, ...]
u = shrink(u, mask[1:-1, 1:-1].shape)
v = shrink(v, mask[1:-1, 1:-1].shape)
u, v = rot2d(u, v, anglev[1:-1, 1:-1])
lon = lon_rho[1:-1, 1:-1]
lat = lat_rho[1:-1, 1:-1]
u, v = u[::n, ::n], v[::n, ::n]
lon, lat = lon[::n, ::n], lat[::n, ::n]
u = ma.masked_invalid(u)
v = ma.masked_invalid(v)
return dict(lon=lon, lat=lat, u=u, v=v, time=time)
# IPython display.
def css_styles(css=style):
"""
Load css styles.
Examples
--------
>>> from IPython.display import HTML
>>> html = css_styles()
>>> isinstance(html, HTML)
True
"""
from IPython.display import HTML
with open(css) as f:
styles = f.read()
return HTML('<style>{}</style>'.format(styles))
def to_html(df, css=style):
"""
Return a pandas table HTML representation with the datagrid css.
Examples
--------
>>> from IPython.display import HTML
>>> from pandas import DataFrame
>>> df = DataFrame(np.empty((5, 5)))
>>> html = to_html(df)
>>> isinstance(html, HTML)
True
"""
from IPython.display import HTML
with open(css, 'r') as f:
style = """<style>{}</style>""".format(f.read())
table = dict(style=style, table=df.to_html())
return HTML('{style}<div class="datagrid">{table}</div>'.format(**table))
def save_html(fname, HTML):
with open(fname, 'w') as f:
f.writelines(HTML.data)
# Mapping
def make_map(bbox, **kw):
"""
Creates a folium map instance for SECOORA.
Examples
--------
>>> from folium import Map
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> m = make_map(bbox)
>>> isinstance(m, Map)
True
"""
import folium
line = kw.pop('line', True)
layers = kw.pop('layers', True)
hf_radar = kw.pop('hf_radar', True)
zoom_start = kw.pop('zoom_start', 5)
lon, lat = np.array(bbox).reshape(2, 2).mean(axis=0)
m = folium.Map(width='100%', height='100%',
location=[lat, lon], zoom_start=zoom_start)
if hf_radar:
url = 'http://hfrnet.ucsd.edu/thredds/wms/HFRNet/USEGC/6km/hourly/RTV'
w = folium.WmsTileLayer(url,
name='HF Radar',
format='image/png',
layers='surface_sea_water_velocity',
attr='HFRNet',
overlay=True,
transparent=True)
w.add_to(m)
if layers:
add = 'MapServer/tile/{z}/{y}/{x}'
base = 'http://services.arcgisonline.com/arcgis/rest/services'
ESRI = dict(Imagery='World_Imagery/MapServer',
Ocean_Base='Ocean/World_Ocean_Base',
Topo_Map='World_Topo_Map/MapServer',
Street_Map='World_Street_Map/MapServer',
Physical_Map='World_Physical_Map/MapServer',
Terrain_Base='World_Terrain_Base/MapServer',
NatGeo_World_Map='NatGeo_World_Map/MapServer',
Shaded_Relief='World_Shaded_Relief/MapServer',
Ocean_Reference='Ocean/World_Ocean_Reference',
Navigation_Charts='Specialty/World_Navigation_Charts')
for name, url in ESRI.items():
url = '{}/{}/{}'.format(base, url, add)
w = folium.TileLayer(tiles=url,
name=name,
attr='ESRI',
overlay=True)
w.add_to(m)
if line: # Create the map and add the bounding box line.
p = folium.PolyLine(get_coordinates(bbox),
color='#FF0000',
weight=2,
opacity=0.9,
latlon=True)
m.add_children(p)
m.add_children(folium.LayerControl())
return m
def embed_html(path="mapa.html", width=750, height=500):
from IPython.display import HTML
"""
Avoid in-lining the source HTMl into the notebook by adding just a link.
CAVEAT: All links must be relative!
Examples
--------
>>> html = embed_html(path="./mapa.html")
>>> isinstance(html, HTML)
"""
html = ('<iframe src="files/{path}" '
'style="width: {width}px; height: {height}px;'
'border: none"></iframe>').format
return HTML(html(path=path, width=width, height=height))
def get_coordinates(bbox):
"""
Create bounding box coordinates for the map. It takes flat or
nested list/numpy.array and returns 5 points that closes square
around the borders.
Examples
--------
>>> bbox = [-87.40, 24.25, -74.70, 36.70]
>>> len(get_coordinates(bbox))
5
"""
bbox = np.asanyarray(bbox).ravel()
if bbox.size == 4:
bbox = bbox.reshape(2, 2)
coordinates = []
coordinates.append([bbox[0][1], bbox[0][0]])
coordinates.append([bbox[0][1], bbox[1][0]])
coordinates.append([bbox[1][1], bbox[1][0]])
coordinates.append([bbox[1][1], bbox[0][0]])
coordinates.append([bbox[0][1], bbox[0][0]])
else:
raise ValueError('Wrong number corners.'
' Expected 4 got {}'.format(bbox.size))
return coordinates
# Web-parsing.
def parse_url(url):
"""
This will preserve any given scheme but will add http if none is
provided.
Examples
--------
>>> parse_url('www.google.com')
'http://www.google.com'
>>> parse_url('https://www.google.com')
'https://www.google.com'
"""
if not urlparse(url).scheme:
url = "http://{}".format(url)
return url
def url_lister(url):
"""
Extract all href links from a given URL.
"""
urls = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//a/@href'):
urls.append(link)
return urls
def nbviewer_link(notebook):
"""
Return a nbviewer link for a given notebook in the current
repository.
"""
# User and repository names.
out = subprocess.Popen(['git', 'remote', 'show', 'origin', '-n'],
stdout=subprocess.PIPE).stdout.read().decode()
out = out.split('\n')
out = [l.strip().split(':')[-1] for l in out if
l.strip().startswith('Fetch')]
user, repo = out[0].split('/')
repo = repo.split('.git')[0]
# Branch name.
out = subprocess.Popen(['git', 'branch'],
stdout=subprocess.PIPE).stdout.read().decode()
out = out.split('\n')
branch = [l.split()[-1] for l in out if l.strip().startswith('*')][0]
# Path
path = os.path.abspath(notebook)
path = ''.join(path.split(repo, 1)[-1])
# URL.
params = dict(user=user,
repo=repo,
branch=branch,
path=path)
url = ('http://nbviewer.ipython.org/github/'
'{user}/{repo}/blob/{branch}{path}').format
return url(**params)
# Misc.
@contextmanager
def time_limit(seconds=10):
"""
Raise a TimeoutException after n `seconds`.
"""
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
class TimeoutException(Exception):
"""
Timeout Exception.
Example
-------
>>> def long_function_call():
... import time
... sec = 0
... while True:
... sec += 1
... time.sleep(1)
>>> try:
... with time_limit(3):
... long_function_call()
... except TimeoutException as msg:
... print('{!r}'.format(msg))
TimeoutException('Timed out!',)
"""
pass
def make_qr(text):
import qrcode
qr = qrcode.QRCode(version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10, border=4)
qr.add_data(text)
qr.make(fit=True)
return qr.make_image()
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "ocefpaf/utilities",
"path": "utilities/pytools.py",
"copies": "2",
"size": "12082",
"license": "mit",
"hash": 9135481297576960000,
"line_mean": 27.0976744186,
"line_max": 78,
"alpha_frac": 0.5312862109,
"autogenerated": false,
"ratio": 3.5091489979668893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504043520886689,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import socket
import subprocess
import sys
import time
class StripeMock(object):
PATH_SPEC = (
os.path.dirname(os.path.realpath(__file__)) + "/openapi/spec3.json"
)
PATH_FIXTURES = (
os.path.dirname(os.path.realpath(__file__)) + "/openapi/fixtures3.json"
)
_port = -1
_process = None
@classmethod
def start(cls):
if not os.path.isfile(cls.PATH_SPEC):
return False
if cls._process is not None:
print("stripe-mock already running on port %s" % cls._port)
return True
cls._port = cls.find_available_port()
print("Starting stripe-mock on port %s..." % cls._port)
cls._process = subprocess.Popen(
[
"stripe-mock",
"-http-port",
str(cls._port),
"-spec",
cls.PATH_SPEC,
"-fixtures",
cls.PATH_FIXTURES,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
time.sleep(1)
if cls._process.poll() is None:
print("Started stripe-mock, PID = %d" % cls._process.pid)
else:
print("stripe-mock terminated early: %d" % cls._process.returncode)
sys.exit(1)
return True
@classmethod
def stop(cls):
if cls._process is None:
return
print("Stopping stripe-mock...")
cls._process.terminate()
cls._process.wait()
cls._process = None
cls._port = -1
print("Stopped stripe-mock")
@classmethod
def port(cls):
return cls._port
@staticmethod
def find_available_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
| {
"repo_name": "stripe/stripe-python",
"path": "tests/stripe_mock.py",
"copies": "1",
"size": "1976",
"license": "mit",
"hash": -3566392560669174000,
"line_mean": 23.3950617284,
"line_max": 79,
"alpha_frac": 0.5278340081,
"autogenerated": false,
"ratio": 4.016260162601626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 81
} |
from __future__ import absolute_import, division, print_function
import os
import stat
import sys
from os.path import isfile, join, expanduser
from conda_build.conda_interface import root_dir
from glob2 import glob
def find_executable(executable, prefix=None, all_matches=False):
# dir_paths is referenced as a module-level variable
# in other code
global dir_paths
result = None
if sys.platform == 'win32':
dir_paths = [join(root_dir, 'Scripts'),
join(root_dir, 'Library\\mingw-w64\\bin'),
join(root_dir, 'Library\\usr\\bin'),
join(root_dir, 'Library\\bin'), ]
if prefix:
dir_paths[0:0] = [join(prefix, 'Scripts'),
join(prefix, 'Library\\mingw-w64\\bin'),
join(prefix, 'Library\\usr\\bin'),
join(prefix, 'Library\\bin'), ]
else:
dir_paths = [join(root_dir, 'bin'), ]
if prefix:
dir_paths.insert(0, join(prefix, 'bin'))
dir_paths.extend(os.environ['PATH'].split(os.pathsep))
if sys.platform == 'win32':
exts = ('.exe', '.bat', '')
else:
exts = ('',)
all_matches_found = []
for dir_path in dir_paths:
for ext in exts:
path = expanduser(join(dir_path, executable + ext))
if isfile(path):
st = os.stat(path)
if sys.platform == 'win32' or st.st_mode & stat.S_IEXEC:
if all_matches:
all_matches_found.append(path)
else:
result = path
break
if not result and any([f in executable for f in ('*', '?', '.')]):
matches = glob(os.path.join(dir_path, executable))
if matches:
if all_matches:
all_matches_found.extend(matches)
else:
result = matches[0]
break
if result:
break
return result or all_matches_found
def find_preferably_prefixed_executable(executable, build_prefix=None, all_matches=False):
found = find_executable('*' + executable, build_prefix, all_matches)
if not found:
# It is possible to force non-prefixed exes by passing os.sep as the
# first character in executable. basename makes this work.
found = find_executable(os.path.basename(executable), build_prefix)
return found
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/os_utils/external.py",
"copies": "3",
"size": "2520",
"license": "bsd-3-clause",
"hash": 3053520997627999700,
"line_mean": 35.5217391304,
"line_max": 90,
"alpha_frac": 0.5384920635,
"autogenerated": false,
"ratio": 4.1042345276872965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015926102882624622,
"num_lines": 69
} |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import imp
from setuptools import setup, find_packages
# Version info
MAJOR = 0
MINOR = 4
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
DESCRIPTION = "schemapi: generate Python APIs from JSONSchema specifications"
# Long description will go up on the pypi page
LONG_DESCRIPTION = """
JSONSchema API Generator
========================
The schemapi_ package provides tools for auto-generation of Python
APIs from JSONSchema_ specifications.
See more information in the README_.
.. _README: https://github.com/altair-viz/schemapi/blob/master/README.md
.. _JSONSchema: http://json-schema.org/
.. _schemapi: https://github.com/altair-viz/schemapi
"""
###############################################################################
# The following versioning code is adapted from the scipy package (BSD license)
# http://github.com/scipy/scipy
# (C) 2003-2017 SciPy Developers
def git_version():
"""Return the git revision as a string"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
"""
Adding the git rev number needs to be done inside
write_version_py(), otherwise the import of version messes
up the build under Python 3.
"""
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('schemapi/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load schemapi/__init__.py
import imp
version = imp.load_source('schemapi.version',
'schemapi/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='schemapi/version.py'):
cnt = '''
# This file is automatically generated by the setup.py script
long_description = """{long_description}"""
short_version = '{version}'
version = '{version}'
full_version = '{full_version}'
git_revision = '{git_revision}'
release = {isrelease}
if not release:
version = full_version
'''
FULLVERSION, GIT_REVISION = get_version_info()
with open(filename, 'w') as f:
f.write(cnt.format(long_description=LONG_DESCRIPTION,
version=VERSION,
full_version=FULLVERSION,
git_revision=GIT_REVISION,
isrelease=str(ISRELEASED)))
# End of code adapted from SciPy
###############################################################################
def setup_package():
write_version_py()
metadata = dict(
name="schemapi",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer="Jake VanderPlas",
maintainer_email="jakevdp@gmail.com",
url="http://github.com/altair-viz/schemapi",
include_package_data=True,
license="BSD",
author="Jake VanderPlas",
author_email="jakevdp@gmail.com",
platforms="OS Independent",
package_data={'schemapi': [os.path.join('schemas', 'json', '*.json')]},
install_requires=["jsonschema"],
python_requires='>3.6',
tests_require=["pytest"],
)
metadata['version'] = get_version_info()[0]
metadata['packages'] = find_packages()
setup(**metadata)
if __name__ == '__main__':
setup_package()
| {
"repo_name": "altair-viz/schemapi",
"path": "setup.py",
"copies": "2",
"size": "4629",
"license": "bsd-3-clause",
"hash": 7542510371094770000,
"line_mean": 30.4897959184,
"line_max": 85,
"alpha_frac": 0.5893281486,
"autogenerated": false,
"ratio": 3.9905172413793104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5579845389979311,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
import yaml
from appr.client import ApprClient
from appr.pack import ApprPackage
from appr.utils import mkdir_p, parse_package_name, parse_version_req
DEFAULT_CHARTS = "appr_charts"
class Helm(object):
def __init__(self):
pass
def download_appr_deps(self, deps, dest=DEFAULT_CHARTS, tarball=False, requests_verify=True):
"""
Creates a directory 'dep_charts' to download and extract all dependencies
fetched from the app-registry server.
returns a helm dependency list
"""
mkdir_p(dest)
helm_deps = []
for dep in deps:
package_parts = parse_package_name(dep['name'])
name = package_parts['package']
vparts = parse_version_req(dep['version'])
client = ApprClient(package_parts['host'], requests_verify=requests_verify)
package_name = '%s/%s' % (package_parts['namespace'], name)
pullpack = client.pull_json(package_name, version_parts=vparts, media_type='helm')
package = ApprPackage(pullpack['blob'], b64_encoded=True)
release = pullpack['release']
packagepath = os.path.join(dest, package_parts['namespace'])
print('Pulled package: %s (%s) \n -> %s' % (dep['name'], release, packagepath),
file=sys.stderr)
if tarball:
with open('%s-%s.tgz' % (name, release), 'wb') as tarfile:
tarfile.write(package.blob)
package.extract(packagepath)
dep.update({
'name': name,
'version': release,
'repository': 'file://%s' % os.path.join(packagepath, package_parts['package'])
})
helm_deps.append(dep)
return helm_deps
def build_dep(self, dest=DEFAULT_CHARTS, overwrite=False):
"""
Reads the dependencies from the requirements.yaml, downloads the packages and updates
the requirements.yaml.
Returns status
"""
if not os.path.isfile('requirements.yaml'):
return 'No requirements.yaml found'
with open('requirements.yaml', 'rb') as requirefile:
deps = yaml.safe_load(requirefile.read())
helm_deps = {}
if 'appr' in deps and deps['appr']:
if 'dependencies' not in deps:
deps['dependencies'] = []
helm_deps = self.download_appr_deps(deps['appr'], dest)
updated_deps = [dep for dep in deps['dependencies']]
updated_deps.extend(helm_deps)
deps['dependencies'] = updated_deps
requirement_output = yaml.safe_dump(deps, default_flow_style=False)
if overwrite:
with open('requirements.yaml', 'wb') as requirefile:
requirefile.write(requirement_output)
return 'Updated requirements.yaml'
else:
return requirement_output
else:
return "No appr-registries dependencies"
def action(self, cmd, package_path, helm_opts=None):
cmd = [cmd]
if helm_opts:
cmd = cmd + helm_opts
cmd.append(package_path)
return self._call(cmd)
def _call(self, cmd):
command = ['helm'] + cmd
return subprocess.check_output(command, stderr=subprocess.STDOUT)
| {
"repo_name": "app-registry/appr",
"path": "appr/plugins/helm.py",
"copies": "2",
"size": "3480",
"license": "apache-2.0",
"hash": 6821989980123252000,
"line_mean": 37.2417582418,
"line_max": 97,
"alpha_frac": 0.5804597701,
"autogenerated": false,
"ratio": 4.128113879003559,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5708573649103559,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import uuid
import mmap
from contextlib import closing
from functools import partial
from distutils.spawn import find_executable
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import Executable, ClauseElement
from toolz import merge
from multipledispatch import MDNotImplementedError
from ..append import append
from ..convert import convert
from .csv import CSV, infer_header
from ..temp import Temp
from .aws import S3
from .sql import fullname, getbind
class CopyFromCSV(Executable, ClauseElement):
def __init__(self, element, csv, delimiter=',', header=None, na_value='',
lineterminator='\n', quotechar='"', escapechar='\\',
encoding='utf8', skiprows=0, bind=None, **kwargs):
if not isinstance(element, sa.Table):
raise TypeError('element must be a sqlalchemy.Table instance')
self.element = element
self.csv = csv
self.delimiter = delimiter
self.header = (
header if header is not None else
(csv.has_header
if csv.has_header is not None else infer_header(csv.path))
)
self.na_value = na_value
self.lineterminator = lineterminator
self.quotechar = quotechar
self.escapechar = escapechar
self.encoding = encoding
self.skiprows = int(skiprows or self.header)
self._bind = getbind(element, bind)
for k, v in kwargs.items():
setattr(self, k, v)
@property
def bind(self):
return self._bind
@compiles(CopyFromCSV, 'sqlite')
def compile_from_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
t = element.element
if not element.header:
csv = element.csv
else:
csv = Temp(CSV)('.%s' % uuid.uuid1())
assert csv.has_header, \
'SQLAlchemy element.header is True but CSV inferred no header'
# write to a temporary file after skipping the first line
chunksize = 1 << 24 # 16 MiB
lineterminator = element.lineterminator.encode(element.encoding)
with open(element.csv.path, 'rb') as f:
with closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mf:
index = mf.find(lineterminator)
if index == -1:
raise ValueError("'%s' not found" % lineterminator)
mf.seek(index + len(lineterminator)) # len because \r\n
with open(csv.path, 'wb') as g:
for chunk in iter(partial(mf.read, chunksize), b''):
g.write(chunk)
fullpath = os.path.abspath(csv.path).encode('unicode-escape').decode()
cmd = ['sqlite3',
'-nullvalue', repr(element.na_value),
'-separator', element.delimiter,
'-cmd', '.import "%s" %s' % (fullpath, t.name),
element.bind.url.database]
stdout, stderr = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE).communicate()
assert not stdout, \
'error: %s from command: %s' % (stdout, ' '.join(cmd))
return ''
@compiles(CopyFromCSV, 'mysql')
def compile_from_csv_mysql(element, compiler, **kwargs):
if element.na_value:
raise ValueError('MySQL does not support custom NULL values')
encoding = {'utf-8': 'utf8'}.get(element.encoding.lower(),
element.encoding or 'utf8')
escapechar = element.escapechar.encode('unicode-escape').decode()
lineterminator = element.lineterminator.encode('unicode-escape').decode()
result = r"""
LOAD DATA {local} INFILE '{path}'
INTO TABLE {0.element.name}
CHARACTER SET {encoding}
FIELDS
TERMINATED BY '{0.delimiter}'
ENCLOSED BY '{0.quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED BY '{0.lineterminator}'
IGNORE {0.skiprows} LINES;
""".format(element,
path=os.path.abspath(element.csv.path),
local=getattr(element, 'local', ''),
encoding=encoding,
lineterminator=lineterminator,
escapechar=escapechar).strip()
return result
@compiles(CopyFromCSV, 'postgresql')
def compile_from_csv_postgres(element, compiler, **kwargs):
encoding = {'utf8': 'utf-8'}.get(element.encoding.lower(),
element.encoding or 'utf8')
if len(element.escapechar) != 1:
raise ValueError('postgres does not allow escapechar longer than 1 '
'byte')
statement = """
COPY {fullname} FROM '{path}'
(FORMAT CSV,
DELIMITER E'{0.delimiter}',
NULL '{0.na_value}',
QUOTE '{0.quotechar}',
ESCAPE '{0.escapechar}',
HEADER {header},
ENCODING '{encoding}')"""
return statement.format(element,
fullname=fullname(element.element, compiler),
path=os.path.abspath(element.csv.path),
header=str(element.header).upper(),
encoding=encoding).strip()
try:
import boto
from odo.backends.aws import S3
from redshift_sqlalchemy.dialect import CopyCommand
except ImportError:
pass
else:
@compiles(CopyFromCSV, 'redshift')
def compile_from_csv_redshift(element, compiler, **kwargs):
assert isinstance(element.csv, S3(CSV))
assert element.csv.path.startswith('s3://')
cfg = boto.Config()
aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id')
aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key')
compression = getattr(element, 'compression', '').upper() or None
cmd = CopyCommand(table=element.element,
data_location=element.csv.path,
access_key_id=aws_access_key_id,
secret_access_key=aws_secret_access_key,
format='CSV',
delimiter=element.delimiter,
ignore_header=int(element.header),
empty_as_null=True,
blanks_as_null=False,
compression=compression)
return compiler.process(cmd)
@append.register(sa.Table, CSV)
def append_csv_to_sql_table(tbl, csv, bind=None, **kwargs):
bind = getbind(tbl, bind)
dialect = bind.dialect.name
# move things to a temporary S3 bucket if we're using redshift and we
# aren't already in S3
if dialect == 'redshift' and not isinstance(csv, S3(CSV)):
csv = convert(Temp(S3(CSV)), csv, **kwargs)
elif dialect != 'redshift' and isinstance(csv, S3(CSV)):
csv = convert(Temp(CSV), csv, has_header=csv.has_header, **kwargs)
elif dialect == 'hive':
from .ssh import SSH
return append(tbl, convert(Temp(SSH(CSV)), csv, **kwargs), **kwargs)
kwargs = merge(csv.dialect, kwargs)
stmt = CopyFromCSV(tbl, csv, bind=bind, **kwargs)
with bind.begin() as conn:
conn.execute(stmt)
return tbl
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/sql_csv.py",
"copies": "1",
"size": "7473",
"license": "bsd-3-clause",
"hash": 6891637471659927000,
"line_mean": 36.7424242424,
"line_max": 82,
"alpha_frac": 0.5903920781,
"autogenerated": false,
"ratio": 4.149361465852304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239753543952305,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import weakref
import numpy as np
from pymor.algorithms.timestepping import ExplicitEulerTimeStepper
from pymor.core.interfaces import ImmutableInterface
from pymor.discretizations.basic import InstationaryDiscretization
from pymor.operators.basic import OperatorBase
from pymor.parameters.spaces import CubicParameterSpace
from pymor.tools import mpi
from pymor.vectorarrays.interfaces import VectorSpace
from pymor.vectorarrays.list import CopyOnWriteVector, ListVectorArray
from pymor.vectorarrays.numpy import NumpyVectorSpace
import libdune_burgers as dune_module
class DuneVector(CopyOnWriteVector):
def __init__(self, impl):
self._impl = impl
@classmethod
def from_instance(cls, instance):
return cls(instance._impl)
@classmethod
def make_zeros(cls, subtype):
assert isinstance(subtype, int)
impl = dune_module.Vector(subtype, 0.)
return cls(impl)
@property
def subtype(self):
return self._impl.dim
@property
def dim(self):
return self._impl.dim
@property
def data(self):
return np.frombuffer(self._impl.buffer())
def _copy_data(self):
self._impl = self._impl.copy()
def _scal(self, alpha):
self._impl.scal(alpha)
def _axpy(self, alpha, x):
self._impl.axpy(alpha, x._impl)
def dot(self, other):
return self._impl.dot(other._impl)
def l1_norm(self):
return self._impl.l1Norm()
def l2_norm(self):
return self._impl.l2Norm()
def sup_norm(self):
return self._impl.supNorm()
def components(self, component_indices):
impl = self._impl
return np.array([impl[int(i)] for i in component_indices])
def amax(self):
ind = self._impl.amaxInd()
return ind, abs(self._impl[ind])
def __getstate__(self):
return (self.dim, self.data)
def __setstate__(self, state):
dim, data = state
self._impl = dune_module.Vector(dim, 0.)
self.data[:] = data
class DuneSpaceOperator(OperatorBase):
linear = False
def __init__(self, d):
self._impl = impl = d.go
self._d = weakref.ref(d)
self.source = self.range = VectorSpace(ListVectorArray, (DuneVector, impl.dimSource))
self.name = 'DuneBurgersSpaceOperator'
self.build_parameter_type({'exponent': tuple()}, local_global=True)
def apply(self, U, ind=None, mu=None):
assert U in self.source
mu = self.parse_parameter(mu)
exponent = float(mu['exponent'])
vectors = U._list if ind is None else [U._list[i] for i in ind]
R = self.range.zeros(len(vectors))
for r, u in zip(R._list, vectors):
self._impl.apply(u._impl, r._impl, exponent, 1.)
return R
def restricted(self, components):
if isinstance(components, np.ndarray):
components = components.tolist()
dims = mpi.comm.allgather(self.source.dim)
offsets = np.cumsum(np.concatenate(([0], dims)))[:-1]
offset = offsets[mpi.rank]
diameter, local_centers, local_source_dofs = self._d().getSubgridData(components, offset)
local_centers = np.array(local_centers, dtype=np.float64)
local_source_dofs = np.array(local_source_dofs, dtype=np.int64)
centers = np.empty((mpi.size,) + local_centers.shape, dtype=np.float64)
source_dofs = np.empty((mpi.size,) + local_source_dofs.shape, dtype=np.int64) if mpi.rank0 else None
mpi.comm.Gather(local_centers, centers, root=0)
mpi.comm.Gather(local_source_dofs, source_dofs, root=0)
if mpi.rank0:
centers = np.sum(centers, axis=0)
source_dofs = np.sum(source_dofs, axis=0)
op, source_dofs, range_dofs = self._d().makeRestrictedSpaceOperator(diameter, centers.tolist(),
source_dofs.tolist())
source_dofs = np.array(source_dofs)
range_dofs = np.array(range_dofs)
return (RestrictedDuneSpaceOperator(op, range_dofs), source_dofs)
class RestrictedDuneSpaceOperator(OperatorBase):
linear = False
def __init__(self, impl, range_dofs):
self._impl = impl
self._range_dofs = range_dofs
self.source = NumpyVectorSpace(impl.dimSource)
self.range = NumpyVectorSpace(len(range_dofs))
self.name = 'DuneBurgersSpaceOperator_restricted'
self._source_vec = dune_module.Vector(impl.dimSource, 0.)
self._range_vec = dune_module.Vector(impl.dimRange, 0.)
self._source_array = np.frombuffer(self._source_vec.buffer())
self._range_array = np.frombuffer(self._range_vec.buffer())
self.build_parameter_type({'exponent': tuple()}, local_global=True)
def apply(self, U, ind=None, mu=None):
assert U in self.source
mu = self.parse_parameter(mu)
exponent = float(mu['exponent'])
U = U.data if ind is None else \
U.data[ind] if hasattr(ind, '__len__') else \
U.data[ind:ind + 1]
R = self.range.zeros(len(U))
R_array = R.data
for i, u in enumerate(U):
self._source_array[:] = u
self._range_array[:] = 0
self._impl.apply(self._source_vec, self._range_vec, exponent, 1.)
R_array[i] = self._range_array[:][self._range_dofs]
return R
class DuneBurgersVisualizer(ImmutableInterface):
def __init__(self, impl):
self._impl = impl
def visualize(self, U, d, filename=None):
assert isinstance(U, ListVectorArray) \
or (isinstance(U, tuple) and all(isinstance(u, ListVectorArray) for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U,) if isinstance(U, ListVectorArray) else U
assert filename is None or len(U) == 1
sources = []
for i, u in enumerate(U):
fn = filename or 'dune-burgers-visualization-{}'.format(i)
sources.append(fn)
if len(u) == 1:
self._impl.visualize(u._list[0]._impl, fn)
else:
for i, uu in enumerate(u._list):
self._impl.visualize(uu._impl, '{}-{:05}'.format(fn, i))
if filename is None:
from pymor.tools import mpi
if mpi.rank0:
subprocess.call(['paraview', 's{:0>4}-dune-burgers-visualization-0-..pvtu'.format(mpi.size)])
import glob
for fn in glob.glob('s*-dune-burgers-visualization-*'):
os.remove(fn)
def discretize_dune_burgers(filename, exponent_range=(1., 2.), cache_region=None):
impl = dune_module.Discretization(filename)
operator = DuneSpaceOperator(impl)
initial_data = operator.source.zeros()
impl.initialProjection(initial_data._list[0]._impl)
nt = impl.nt
T = impl.T
time_stepper = ExplicitEulerTimeStepper(nt)
parameter_space = CubicParameterSpace({'exponent': tuple()}, *exponent_range)
visualizer = DuneBurgersVisualizer(impl)
d = InstationaryDiscretization(T, initial_data, operator, time_stepper=time_stepper,
parameter_space=parameter_space,
visualizer=visualizer, name='DuneBurgers',
cache_region=cache_region)
# d.generate_sid()
return d
| {
"repo_name": "pymor/dune-burgers-demo",
"path": "dune-burgers/pymor-wrapper/dune_burgers.py",
"copies": "1",
"size": "7529",
"license": "bsd-2-clause",
"hash": -3313495520445588500,
"line_mean": 32.1674008811,
"line_max": 109,
"alpha_frac": 0.6093770753,
"autogenerated": false,
"ratio": 3.614498319731157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9721562318214141,
"avg_score": 0.00046261536340314887,
"num_lines": 227
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from os.path import isfile, join, expanduser
import conda.config as cc
from conda_build.config import config
def find_executable(executable):
# dir_paths is referenced as a module-level variable
# in other code
global dir_paths
if sys.platform == 'win32':
dir_paths = [join(config.build_prefix, 'Scripts'),
join(cc.root_dir, 'Scripts'),
'C:\\cygwin\\bin']
else:
dir_paths = [join(config.build_prefix, 'bin'),
join(cc.root_dir, 'bin'),]
dir_paths.extend(os.environ['PATH'].split(os.pathsep))
for dir_path in dir_paths:
if sys.platform == 'win32':
for ext in '.exe', '.bat', '':
path = join(dir_path, executable + ext)
if isfile(path):
return path
else:
path = join(dir_path, executable)
if isfile(expanduser(path)):
return expanduser(path)
return None
| {
"repo_name": "shastings517/conda-build",
"path": "conda_build/external.py",
"copies": "5",
"size": "1075",
"license": "bsd-3-clause",
"hash": 6477200613203948000,
"line_mean": 30.6176470588,
"line_max": 64,
"alpha_frac": 0.568372093,
"autogenerated": false,
"ratio": 3.9667896678966788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7035161760896679,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from os.path import join, isdir, isfile, abspath, expanduser
from shutil import copytree, copy2
from subprocess import check_call, Popen, PIPE, CalledProcessError
import locale
from conda.fetch import download
from conda.utils import hashsum_file
from conda_build import external
from conda_build.config import config
from conda_build.utils import rm_rf, tar_xf, unzip, safe_print_unicode
SRC_CACHE = join(config.croot, 'src_cache')
GIT_CACHE = join(config.croot, 'git_cache')
HG_CACHE = join(config.croot, 'hg_cache')
SVN_CACHE = join(config.croot, 'svn_cache')
WORK_DIR = join(config.croot, 'work')
def get_dir():
if not isdir(WORK_DIR):
os.makedirs(WORK_DIR)
lst = [fn for fn in os.listdir(WORK_DIR) if not fn.startswith('.')]
if len(lst) == 1:
dir_path = join(WORK_DIR, lst[0])
if isdir(dir_path):
return dir_path
return WORK_DIR
def download_to_cache(meta):
''' Download a source to the local cache. '''
print('Source cache directory is: %s' % SRC_CACHE)
if not isdir(SRC_CACHE):
os.makedirs(SRC_CACHE)
fn = meta['fn']
path = join(SRC_CACHE, fn)
if isfile(path):
print('Found source in cache: %s' % fn)
else:
print('Downloading source to cache: %s' % fn)
if not isinstance(meta['url'], list):
meta['url'] = [meta['url']]
for url in meta['url']:
try:
print("Downloading %s" % url)
download(url, path)
except RuntimeError as e:
print("Error: %s" % str(e).strip(), file=sys.stderr)
else:
print("Success")
break
else: # no break
raise RuntimeError("Could not download %s" % fn)
for tp in 'md5', 'sha1', 'sha256':
if meta.get(tp) and hashsum_file(path, tp) != meta[tp]:
raise RuntimeError("%s mismatch: '%s' != '%s'" %
(tp.upper(), hashsum_file(path, tp), meta[tp]))
return path
def unpack(meta):
''' Uncompress a downloaded source. '''
src_path = download_to_cache(meta)
os.makedirs(WORK_DIR)
print("Extracting download")
if src_path.lower().endswith(('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz',
'.tar', 'tar.z')):
tar_xf(src_path, WORK_DIR)
elif src_path.lower().endswith('.zip'):
unzip(src_path, WORK_DIR)
else:
# In this case, the build script will need to deal with unpacking the source
print("Warning: Unrecognized source format. Source file will be copied to the SRC_DIR")
copy2(src_path, WORK_DIR)
def git_source(meta, recipe_dir):
''' Download a source from Git repo. '''
if not isdir(GIT_CACHE):
os.makedirs(GIT_CACHE)
git = external.find_executable('git')
if not git:
sys.exit("Error: git is not installed")
git_url = meta['git_url']
git_depth = int(meta.get('git_depth', -1))
if git_url.startswith('.'):
# It's a relative path from the conda recipe
os.chdir(recipe_dir)
git_dn = abspath(expanduser(git_url))
git_dn = "_".join(git_dn.split(os.path.sep)[1:])
else:
git_dn = git_url.split(':')[-1].replace('/', '_')
cache_repo = cache_repo_arg = join(GIT_CACHE, git_dn)
if sys.platform == 'win32':
is_cygwin = 'cygwin' in git.lower()
cache_repo_arg = cache_repo_arg.replace('\\', '/')
if is_cygwin:
cache_repo_arg = '/cygdrive/c/' + cache_repo_arg[3:]
# update (or create) the cache repo
if isdir(cache_repo):
check_call([git, 'fetch'], cwd=cache_repo)
else:
args = [git, 'clone', '--mirror']
if git_depth > 0:
args += ['--depth', git_depth]
check_call(args + [git_url, cache_repo_arg], cwd=recipe_dir)
assert isdir(cache_repo)
# now clone into the work directory
checkout = meta.get('git_rev')
# if rev is not specified, and the git_url is local,
# assume the user wants the current HEAD
if not checkout and git_url.startswith('.'):
process = Popen(["git", "rev-parse", "HEAD"],
stdout=PIPE, stderr=PIPE,
cwd=git_url)
output = process.communicate()[0].strip()
checkout = output.decode('utf-8')
if checkout:
print('checkout: %r' % checkout)
check_call([git, 'clone', '--recursive', cache_repo_arg, WORK_DIR])
if checkout:
check_call([git, 'checkout', checkout], cwd=WORK_DIR)
git_info()
return WORK_DIR
def git_info(fo=None):
''' Print info about a Git repo. '''
assert isdir(WORK_DIR)
# Ensure to explicitly set GIT_DIR as some Linux machines will not
# properly execute without it.
env = os.environ.copy()
env['GIT_DIR'] = join(WORK_DIR, '.git')
env = {str(key): str(value) for key, value in env.items()}
for cmd, check_error in [
('git log -n1', True),
('git describe --tags --dirty', False),
('git status', True)]:
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE, cwd=WORK_DIR, env=env)
stdout, stderr = p.communicate()
encoding = locale.getpreferredencoding()
if not fo:
encoding = sys.stdout.encoding
encoding = encoding or 'utf-8'
stdout = stdout.decode(encoding, 'ignore')
stderr = stderr.decode(encoding, 'ignore')
if check_error and stderr and stderr.strip():
raise Exception("git error: %s" % stderr)
if fo:
fo.write(u'==> %s <==\n' % cmd)
fo.write(stdout + u'\n')
else:
print(u'==> %s <==\n' % cmd)
safe_print_unicode(stdout + u'\n')
def hg_source(meta):
''' Download a source from Mercurial repo. '''
hg = external.find_executable('hg')
if not hg:
sys.exit('Error: hg not installed')
hg_url = meta['hg_url']
if not isdir(HG_CACHE):
os.makedirs(HG_CACHE)
hg_dn = hg_url.split(':')[-1].replace('/', '_')
cache_repo = join(HG_CACHE, hg_dn)
if isdir(cache_repo):
check_call([hg, 'pull'], cwd=cache_repo)
else:
check_call([hg, 'clone', hg_url, cache_repo])
assert isdir(cache_repo)
# now clone in to work directory
update = meta.get('hg_tag') or 'tip'
print('checkout: %r' % update)
check_call([hg, 'clone', cache_repo, WORK_DIR])
check_call([hg, 'update', '-C', update], cwd=WORK_DIR)
return WORK_DIR
def svn_source(meta):
''' Download a source from SVN repo. '''
def parse_bool(s):
return str(s).lower().strip() in ('yes', 'true', '1', 'on')
svn = external.find_executable('svn')
if not svn:
sys.exit("Error: svn is not installed")
svn_url = meta['svn_url']
svn_revision = meta.get('svn_rev') or 'head'
svn_ignore_externals = parse_bool(meta.get('svn_ignore_externals') or 'no')
if not isdir(SVN_CACHE):
os.makedirs(SVN_CACHE)
svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
cache_repo = join(SVN_CACHE, svn_dn)
if svn_ignore_externals:
extra_args = ['--ignore-externals']
else:
extra_args = []
if isdir(cache_repo):
check_call([svn, 'up', '-r', svn_revision] + extra_args, cwd=cache_repo)
else:
check_call([svn, 'co', '-r', svn_revision] + extra_args + [svn_url,
cache_repo])
assert isdir(cache_repo)
# now copy into work directory
copytree(cache_repo, WORK_DIR, symlinks=True)
return WORK_DIR
def _ensure_unix_line_endings(path):
"""Replace windows line endings with Unix. Return path to modified file."""
out_path = path + "_unix"
with open(path) as inputfile:
with open(out_path, "w") as outputfile:
for line in inputfile:
outputfile.write(line.replace("\r\n", "\n"))
return out_path
def apply_patch(src_dir, path):
print('Applying patch: %r' % path)
if not isfile(path):
sys.exit('Error: no such patch: %s' % path)
patch = external.find_executable('patch')
if patch is None:
sys.exit("""\
Error:
Did not find 'patch' in: %s
You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
or conda, cygwin (Windows),
""" % (os.pathsep.join(external.dir_paths)))
patch_args = ['-p0', '-i', path]
if sys.platform == 'win32':
patch_args[-1] = _ensure_unix_line_endings(path)
try:
check_call([patch] + patch_args, cwd=src_dir)
except CalledProcessError:
sys.exit(1)
if sys.platform == 'win32' and os.path.exists(patch_args[-1]):
os.remove(patch_args[-1]) # clean up .patch_unix file
def provide(recipe_dir, meta, patch=True):
"""
given a recipe_dir:
- download (if necessary)
- unpack
- apply patches (if any)
"""
print("Removing old work directory")
rm_rf(WORK_DIR)
if 'fn' in meta:
unpack(meta)
elif 'git_url' in meta:
git_source(meta, recipe_dir)
elif 'hg_url' in meta:
hg_source(meta)
elif 'svn_url' in meta:
svn_source(meta)
elif 'path' in meta:
print("Copying %s to %s" % (abspath(join(recipe_dir, meta.get('path'))), WORK_DIR))
copytree(abspath(join(recipe_dir, meta.get('path'))), WORK_DIR)
else: # no source
os.makedirs(WORK_DIR)
if patch:
src_dir = get_dir()
for patch in meta.get('patches', []):
apply_patch(src_dir, join(recipe_dir, patch))
if __name__ == '__main__':
print(provide('.',
{'url': 'http://pypi.python.org/packages/source/b/bitarray/bitarray-0.8.0.tar.gz',
'git_url': 'git@github.com:ilanschnell/bitarray.git',
'git_tag': '0.5.2'}))
| {
"repo_name": "sandhujasmine/conda-build",
"path": "conda_build/source.py",
"copies": "1",
"size": "9958",
"license": "bsd-3-clause",
"hash": 42665194595769784,
"line_mean": 32.1933333333,
"line_max": 100,
"alpha_frac": 0.5726049408,
"autogenerated": false,
"ratio": 3.4516464471403814,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9517770720485395,
"avg_score": 0.0012961334909972189,
"num_lines": 300
} |
from __future__ import absolute_import, division, print_function
import os
import sys
from os.path import join, isdir, isfile, abspath, expanduser
from shutil import copytree, ignore_patterns, copy2
from subprocess import check_call, Popen, PIPE
from conda.fetch import download
from conda.utils import hashsum_file
from conda_build import external
from conda_build.config import config
from conda_build.utils import rm_rf, tar_xf, unzip
SRC_CACHE = join(config.croot, 'src_cache')
GIT_CACHE = join(config.croot, 'git_cache')
HG_CACHE = join(config.croot, 'hg_cache')
SVN_CACHE = join(config.croot, 'svn_cache')
WORK_DIR = join(config.croot, 'work')
def get_dir():
if not isdir(WORK_DIR):
os.makedirs(WORK_DIR)
lst = [fn for fn in os.listdir(WORK_DIR) if not fn.startswith('.')]
if len(lst) == 1:
dir_path = join(WORK_DIR, lst[0])
if isdir(dir_path):
return dir_path
return WORK_DIR
def download_to_cache(meta):
''' Download a source to the local cache. '''
print('Source cache directory is: %s' % SRC_CACHE)
if not isdir(SRC_CACHE):
os.makedirs(SRC_CACHE)
fn = meta['fn']
path = join(SRC_CACHE, fn)
if isfile(path):
print('Found source in cache: %s' % fn)
else:
print('Downloading source to cache: %s' % fn)
download(meta['url'], path)
for tp in 'md5', 'sha1', 'sha256':
if meta.get(tp) and hashsum_file(path, tp) != meta[tp]:
raise RuntimeError("%s mismatch: '%s' != '%s'" %
(tp.upper(), hashsum_file(path, tp), meta[tp]))
return path
def unpack(meta):
''' Uncompress a downloaded source. '''
src_path = download_to_cache(meta)
os.makedirs(WORK_DIR)
print("Extracting download")
if src_path.lower().endswith(('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz', '.tar')):
tar_xf(src_path, WORK_DIR)
elif src_path.lower().endswith('.zip'):
unzip(src_path, WORK_DIR)
else:
# In this case, the build script will need to deal with unpacking the source
print("Warning: Unrecognized source format. Source file will be copied to the SRC_DIR")
copy2(src_path, WORK_DIR)
def git_source(meta, recipe_dir):
''' Download a source from Git repo. '''
if not isdir(GIT_CACHE):
os.makedirs(GIT_CACHE)
git = external.find_executable('git')
if not git:
sys.exit("Error: git is not installed")
git_url = meta['git_url']
if git_url.startswith('.'):
# It's a relative path from the conda recipe
os.chdir(recipe_dir)
git_dn = abspath(expanduser(git_url))
git_dn = "_".join(git_dn.split(os.path.sep)[1:])
else:
git_dn = git_url.split(':')[-1].replace('/', '_')
cache_repo = cache_repo_arg = join(GIT_CACHE, git_dn)
if sys.platform == 'win32':
cache_repo_arg = cache_repo_arg.replace('\\', '/')
if os.getenv('USERNAME') == 'builder':
cache_repo_arg = '/cygdrive/c/' + cache_repo_arg[3:]
# update (or create) the cache repo
if isdir(cache_repo):
check_call([git, 'fetch'], cwd=cache_repo)
else:
check_call([git, 'clone', '--mirror', git_url, cache_repo_arg], cwd=recipe_dir)
assert isdir(cache_repo)
# now clone into the work directory
checkout = meta.get('git_rev')
# if rev is not specified, and the git_url is local,
# assume the user wants the current HEAD
if not checkout and git_url.startswith('.'):
process = Popen(["git", "rev-parse", "HEAD"],
stdout=PIPE, stderr=PIPE,
cwd=git_url)
output = process.communicate()[0].strip()
checkout = output.decode('utf-8')
if checkout:
print('checkout: %r' % checkout)
check_call([git, 'clone', cache_repo_arg, WORK_DIR])
if checkout:
check_call([git, 'checkout', checkout], cwd=WORK_DIR)
git_info()
return WORK_DIR
def git_info(fo=None):
''' Print info about a Git repo. '''
assert isdir(WORK_DIR)
# Ensure to explicitly set GIT_DIR as some Linux machines will not
# properly execute without it.
env = os.environ.copy()
env['GIT_DIR'] = join(WORK_DIR, '.git')
env = {str(key): str(value) for key, value in env.items()}
for cmd, check_error in [
('git log -n1', True),
('git describe --tags --dirty', False),
('git status', True)]:
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE, cwd=WORK_DIR, env=env)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if check_error and stderr and stderr.strip():
raise Exception("git error: %s" % stderr)
if fo:
fo.write(u'==> %s <==\n' % cmd)
fo.write(stdout + u'\n')
else:
print(u'==> %s <==\n' % cmd)
print(stdout + u'\n')
def hg_source(meta):
''' Download a source from Mercurial repo. '''
hg = external.find_executable('hg')
if not hg:
sys.exit('Error: hg not installed')
hg_url = meta['hg_url']
if not isdir(HG_CACHE):
os.makedirs(HG_CACHE)
hg_dn = hg_url.split(':')[-1].replace('/', '_')
cache_repo = join(HG_CACHE, hg_dn)
if isdir(cache_repo):
check_call([hg, 'pull'], cwd=cache_repo)
else:
check_call([hg, 'clone', hg_url, cache_repo])
assert isdir(cache_repo)
# now clone in to work directory
update = meta.get('hg_tag') or 'tip'
print('checkout: %r' % update)
check_call([hg, 'clone', cache_repo, WORK_DIR])
check_call([hg, 'update', '-C', update], cwd=WORK_DIR)
return WORK_DIR
def svn_source(meta):
''' Download a source from SVN repo. '''
def parse_bool(s):
return str(s).lower().strip() in ('yes', 'true', '1', 'on')
svn = external.find_executable('svn')
if not svn:
sys.exit("Error: svn is not installed")
svn_url = meta['svn_url']
svn_revision = meta.get('svn_rev') or 'head'
svn_ignore_externals = parse_bool(meta.get('svn_ignore_externals') or 'no')
if not isdir(SVN_CACHE):
os.makedirs(SVN_CACHE)
svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
cache_repo = join(SVN_CACHE, svn_dn)
if svn_ignore_externals:
extra_args = ['--ignore-externals']
else:
extra_args = []
if isdir(cache_repo):
check_call([svn, 'up', '-r', svn_revision] + extra_args, cwd=cache_repo)
else:
check_call([svn, 'co', '-r', svn_revision] + extra_args + [svn_url,
cache_repo])
assert isdir(cache_repo)
# now copy into work directory
copytree(cache_repo, WORK_DIR)
return WORK_DIR
def apply_patch(src_dir, path):
print('Applying patch: %r' % path)
if not isfile(path):
sys.exit('Error: no such patch: %s' % path)
patch = external.find_executable('patch')
if patch is None:
sys.exit("""\
Error:
Did not find 'patch' in: %s
You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
or conda, cygwin (Windows),
""" % (os.pathsep.join(external.dir_paths)))
check_call([patch, '-p0', '-i', path], cwd=src_dir)
def provide(recipe_dir, meta, patch=True):
"""
given a recipe_dir:
- download (if necessary)
- unpack
- apply patches (if any)
"""
print("Removing old work directory")
rm_rf(WORK_DIR)
if 'fn' in meta:
unpack(meta)
elif 'git_url' in meta:
git_source(meta, recipe_dir)
elif 'hg_url' in meta:
hg_source(meta)
elif 'svn_url' in meta:
svn_source(meta)
elif 'path' in meta:
print("Copying %s to %s" % (abspath(join(recipe_dir, meta.get('path'))), WORK_DIR))
copytree(abspath(join(recipe_dir, meta.get('path'))), WORK_DIR)
else: # no source
os.makedirs(WORK_DIR)
if patch:
src_dir = get_dir()
for patch in meta.get('patches', []):
apply_patch(src_dir, join(recipe_dir, patch))
if __name__ == '__main__':
print(provide('.',
{'url': 'http://pypi.python.org/packages/source/b/bitarray/bitarray-0.8.0.tar.gz',
'git_url': 'git@github.com:ilanschnell/bitarray.git',
'git_tag': '0.5.2'}))
| {
"repo_name": "takluyver/conda-build",
"path": "conda_build/source.py",
"copies": "1",
"size": "8418",
"license": "bsd-3-clause",
"hash": -7404552572830516000,
"line_mean": 31.7548638132,
"line_max": 100,
"alpha_frac": 0.5773342837,
"autogenerated": false,
"ratio": 3.3902537253322595,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44675880090322595,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import ctypes
import traceback
import idaapi
import idc
from ctypes import (c_int, c_void_p, create_string_buffer, cast)
from PyQt5.QtCore import Qt, QTimer, QObject
from PyQt5.QtGui import QShowEvent
from PyQt5.QtWidgets import QWidget, QDialog, QDialogButtonBox, QPushButton, qApp
__all__ = ['load_clr_file']
def _ida_lib():
ea_name = 'ida64' if idc.__EA64__ else 'ida'
if sys.platform == 'win32':
functype = ctypes.WINFUNCTYPE
lib = ctypes.WinDLL(ea_name)
elif sys.platform == 'darwin':
functype = ctypes.CFUNCTYPE
lib = ctypes.CDLL(idaapi.idadir("lib" + ea_name + ".dylib"))
else:
functype = ctypes.CFUNCTYPE
lib = ctypes.CDLL('lib' + ea_name + '.so')
return functype, lib
functype, lib = _ida_lib()
hook_cb_t = functype(c_void_p, c_void_p, c_int, c_void_p)
hook_to_notification_point = lib.hook_to_notification_point
hook_to_notification_point.argtypes = [c_int, hook_cb_t, c_void_p]
unhook_from_notification_point = lib.unhook_from_notification_point
unhook_from_notification_point.argtypes = [c_int, hook_cb_t, c_void_p]
class TemporaryFilter(QObject):
"""
Temporary event filter installed at qApp to catch events
while executing QDialog::exec.
The filter automatically clicks &Import button,
and automatically selects file by using native ui hooks.
"""
def __init__(self, filepath):
super(TemporaryFilter, self).__init__()
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise IOError("Assertion Error: os.path.isfile(filepath)")
self.filepath = filepath
def eventFilter(self, obj, event):
def is_colors_dialog():
return isinstance(
obj, QDialog) and 'IDA Colors' in obj.windowTitle()
if isinstance(event, QShowEvent) and is_colors_dialog():
qApp.removeEventFilter(self)
# Hide window and find &Import button
obj.windowHandle().setOpacity(0)
buttons = [widget for widget in obj.children() if isinstance(
widget, QDialogButtonBox)][0]
button = [widget for widget in buttons.buttons() if widget.text()
== '&Import'][0]
with NativeHook(ask_file=self.ask_file_handler):
button.click()
QTimer.singleShot(0, lambda: obj.accept())
return 1
return 0
def ask_file_handler(self):
return create_string_buffer(self.filepath)
class NativeHook:
"""
Installer for non-exposed hooks from UI_Hooks.
This uses hook_to_notification_point with HT_UI.
with NativeHook(ask_file=lambda: 0):
# do anything
"""
NAMES = {
'ask_file': 0x1d
}
HT_UI = 1
def __init__(self, **kwargs):
self.hooks = {NativeHook.NAMES[key]: value for key, value in kwargs.items()}
self._handler = hook_cb_t(self.handler)
def handler(self, _user_data, code, _va_args):
if code in self.hooks:
try:
res = self.hooks[code]()
return cast(res, c_void_p).value
except:
traceback.print_exc()
return 0
else:
return 0
def __enter__(self):
hook_to_notification_point(NativeHook.HT_UI, self._handler, None)
def __exit__(self, *args):
unhook_from_notification_point(NativeHook.HT_UI, self._handler, None)
def load_clr_file(filepath):
event_filter = TemporaryFilter(filepath)
qApp.installEventFilter(event_filter)
return idaapi.process_ui_action('SetColors')
| {
"repo_name": "zyantific/IDASkins",
"path": "plugins/idaskins/clrapplier.py",
"copies": "1",
"size": "3725",
"license": "mit",
"hash": -6940119221450813000,
"line_mean": 28.8,
"line_max": 84,
"alpha_frac": 0.6238926174,
"autogenerated": false,
"ratio": 3.6235408560311284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9744475080317785,
"avg_score": 0.0005916786226685797,
"num_lines": 125
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import random
import subprocess
import socket
import time
import blaze
import datashape
import unittest
from blaze.catalog.tests.catalog_harness import CatalogHarness
from blaze.datadescriptor import dd_as_py, RemoteDataDescriptor
class TestServer(unittest.TestCase):
def startServer(self):
# Start the server
serverpy = os.path.join(os.path.dirname(__file__),
'start_simple_server.py')
for attempt in range(2):
self.port = 10000 + random.randrange(30000)
cflags = 0
exe = sys.executable
if sys.platform == 'win32':
if sys.version_info[:2] > (2, 6):
cflags |= subprocess.CREATE_NEW_PROCESS_GROUP
# Make sure Python.exe, not Pythonw.exe (to work around
# errors in numba in a GUI context)
exe = os.path.join(os.path.dirname(exe), 'Python.exe')
self.proc = subprocess.Popen([sys.executable,
serverpy,
self.cat.catfile,
str(self.port)],
executable=exe,
creationflags=cflags)
for i in range(30):
time.sleep(0.2)
if self.proc.poll() is not None:
break
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if s.connect_ex(('127.0.0.1',self.port)) == 0:
s.close()
return
s.close()
print("Couldn't start Blaze test server attempt %d" % attempt)
self.proc.terminate()
raise RuntimeError('Failed to start the test Blaze server')
def setUp(self):
self.cat = CatalogHarness()
# Load the test catalog for comparison with the server
blaze.catalog.load_config(self.cat.catfile)
self.startServer()
self.baseurl = 'http://localhost:%d' % self.port
def tearDown(self):
self.proc.terminate()
blaze.catalog.load_default()
self.cat.close()
def test_get_arr(self):
ra = blaze.array(RemoteDataDescriptor('%s/csv_arr' % self.baseurl))
la = blaze.catalog.get('/csv_arr')
self.assertEqual(la.dshape, ra.dshape)
self.assertEqual(dd_as_py(la._data), dd_as_py(blaze.eval(ra)._data))
def test_compute(self):
ra = blaze.array(RemoteDataDescriptor('%s/py_arr' % self.baseurl))
result = ra + 1
result = blaze.eval(result)
self.assertEqual(result.dshape, datashape.dshape('5, int32'))
self.assertEqual(dd_as_py(result._data), [2, 3, 4, 5, 6])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "zzmjohn/blaze",
"path": "blaze/io/server/tests/test_server.py",
"copies": "7",
"size": "2901",
"license": "bsd-3-clause",
"hash": -836406605611010000,
"line_mean": 36.1923076923,
"line_max": 76,
"alpha_frac": 0.5480868666,
"autogenerated": false,
"ratio": 4.012448132780083,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8060534999380082,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
from os.path import dirname, isdir, isfile, join, exists
import conda.config as cc
from conda.compat import iteritems
from conda_build.config import config
from conda_build import environ
from conda_build import source
from conda_build.utils import _check_call
try:
import psutil
except ImportError:
psutil = None
assert sys.platform == 'win32'
def fix_staged_scripts():
"""
Fixes scripts which have been installed unix-style to have a .bat
helper
"""
scripts_dir = join(config.build_prefix, 'Scripts')
if not isdir(scripts_dir):
return
for fn in os.listdir(scripts_dir):
# process all the extensionless files
if not isfile(join(scripts_dir, fn)) or '.' in fn:
continue
with open(join(scripts_dir, fn)) as f:
line = f.readline().lower()
# If it's a #!python script
if not (line.startswith('#!') and 'python' in line.lower()):
continue
print('Adjusting unix-style #! script %s, '
'and adding a .bat file for it' % fn)
# copy it with a .py extension (skipping that first #! line)
with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:
fo.write(f.read())
# now create the .exe file
shutil.copyfile(join(dirname(__file__),
'cli-%d.exe' % (8 * tuple.__itemsize__)),
join(scripts_dir, fn + '.exe'))
# remove the original script
os.remove(join(scripts_dir, fn))
def msvc_env_cmd():
if 'ProgramFiles(x86)' in os.environ:
program_files = os.environ['ProgramFiles(x86)']
else:
program_files = os.environ['ProgramFiles']
if config.PY3K:
vcvarsall = os.path.join(program_files,
r'Microsoft Visual Studio 10.0'
r'\VC\vcvarsall.bat')
else:
vcvarsall = os.path.join(program_files,
r'Microsoft Visual Studio 9.0'
r'\VC\vcvarsall.bat')
if not isfile(vcvarsall):
print("Warning: Couldn't find Visual Studio: %r" % vcvarsall)
return ''
return '''\
call "%s" %s
''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])
def kill_processes():
if psutil is None:
return
for n in psutil.get_pid_list():
try:
p = psutil.Process(n)
if p.name.lower() == 'msbuild.exe':
print('Terminating:', p.name)
p.terminate()
except:
continue
def build(m):
env = dict(os.environ)
env.update(environ.get_dict(m))
for name in 'BIN', 'INC', 'LIB':
path = env['LIBRARY_' + name]
if not isdir(path):
os.makedirs(path)
src_dir = source.get_dir()
bld_bat = join(m.path, 'bld.bat')
if exists(bld_bat):
with open(bld_bat) as fi:
data = fi.read()
with open(join(src_dir, 'bld.bat'), 'w') as fo:
fo.write(msvc_env_cmd())
# more debuggable with echo on
fo.write('@echo on\n')
for kv in iteritems(env):
fo.write('set %s=%s\n' % kv)
fo.write("REM ===== end generated header =====\n")
fo.write(data)
cmd = [os.environ['COMSPEC'], '/c', 'bld.bat']
_check_call(cmd, cwd=src_dir)
kill_processes()
fix_staged_scripts()
| {
"repo_name": "takluyver/conda-build",
"path": "conda_build/windows.py",
"copies": "2",
"size": "3589",
"license": "bsd-3-clause",
"hash": -2797613596972959000,
"line_mean": 29.1596638655,
"line_max": 74,
"alpha_frac": 0.5408191697,
"autogenerated": false,
"ratio": 3.730769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005252100840336134,
"num_lines": 119
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import requests
import logging
import zipfile
from subprocess import Popen, PIPE
from .exceptions import CondaException
from .utils import shell_out
mini_file = "Miniconda-latest.sh"
miniconda_urls = {"linux": "https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh",
"darwin": "https://repo.continuum.io/miniconda/Miniconda-latest-MacOSX-x86_64.sh",
"win": "https://repo.continuum.io/miniconda/Miniconda-latest-Windows-x86_64.exe"
}
logger = logging.getLogger(__name__)
class CondaCreator(object):
"""
Create Conda Env
"""
def __init__(self, conda_root=None):
self.conda_dir = os.path.join(os.path.dirname(__file__), 'tmp_conda')
self.minifile_fp = os.path.join(self.conda_dir, mini_file)
self.conda_root = conda_root or os.path.join(self.conda_dir, 'miniconda')
self.python_bin = os.path.join(self.conda_root, 'bin', 'python')
self.conda_envs = os.path.join(self.conda_root, 'envs')
self.conda_bin = os.path.join(self.conda_root, 'bin', 'conda')
@property
def miniconda_url(self):
if sys.platform.startswith('linux'):
url = miniconda_urls['linux']
elif sys.platform.startswith('darwin'):
url = miniconda_urls['darwin']
else:
url = miniconda_urls['win']
# 64bit check
if not sys.maxsize > 2**32:
url = url.replace("_64", "")
return url
@property
def miniconda_check(self):
return os.path.exists(self.conda_root)
def _download_miniconda(self):
if not os.path.exists(self.conda_dir):
os.mkdir(self.conda_dir)
mini_file = os.path.join(self.conda_dir, self.minifile_fp)
if os.path.exists(mini_file):
return mini_file
logger.debug("Downloading latest Miniconda.sh")
r = requests.get(self.miniconda_url, stream=True)
with open(mini_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return os.path.abspath(mini_file)
def _install_miniconda(self):
"""
Install miniconda.
Returns True if miniconda is successfully installed or was previously
created
"""
if self.miniconda_check:
return self.conda_root
install_cmd = "bash {0} -b -p {1}".format(self.minifile_fp, self.conda_root).split()
self._download_miniconda()
logger.debug("Installing Miniconda in {0}".format(self.conda_root))
proc = Popen(install_cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
return os.path.exists(self.python_bin)
def _create_env(self, env_name, packages=None, remove=False):
"""
Create Conda env environment
Parameters
----------
env_name : str
packages : list
remove : bool
remove environment should it exist
Returns
-------
path : str
path to newly created conda environment
"""
# ensure miniconda is installed
self._install_miniconda()
env_path = os.path.join(self.conda_root, 'envs', env_name)
if os.path.exists(env_path):
conda_list = shell_out([self.conda_bin, 'list', '-n', env_name]).split()
# filter out python/python=3
pkgs = [p for p in packages if not 'python' in p]
# try to be idempotent -- if packages exist don't recreate
if any(p in conda_list for p in packages):
return env_path
if not remove:
raise CondaException("Conda environment: {0} already exists".format(env_name))
else:
shutil.rmtree(env_path)
if not isinstance(packages, list):
raise TypeError("Packages must be a list of strings")
cmd = [self.conda_bin, 'create', '-p', env_path, '--copy', '-y', '-q'] + packages
logger.info("Creating new env {0}".format(env_name))
logger.info(' '.join(cmd))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
logger.debug(out)
logger.debug(err)
env_python = os.path.join(env_path, 'bin', 'python')
if not os.path.exists(env_python):
raise CondaException("Failed to create Python binary.")
return env_path
def find_env(self, env_name):
"""
Find full path to env_name
Parameters
----------
env_name : str
Returns
-------
path : str
path to conda environment
"""
env_path = os.path.join(self.conda_root, 'envs', env_name)
if os.path.exists(env_path):
return env_path
def create_env(self, env_name, packages=None, remove=False):
"""
Create zipped directory of a conda environmentt
Parameters
----------
env_name : str
packages : list
remove : bool
remove environment should it exist
Returns
-------
path : str
path to zipped conda environment
"""
if not packages:
env_path = self.find_env(env_name)
else:
env_path = self._create_env(env_name, packages, remove)
return self.zip_env(env_path)
def zip_env(self, env_path):
"""
Zip env directory
Parameters
----------
env_path : string
Returns
-------
path : string
path to zipped file
"""
fname = os.path.basename(env_path) + '.zip'
env_dir = os.path.dirname(env_path)
zFile = os.path.join(env_dir, fname)
# ZipFile does not have a contextmanager in Python 2.6
f = zipfile.ZipFile(zFile, 'w')
try:
for root, dirs, files in os.walk(env_path):
for file in files:
relfile = os.path.join(os.path.relpath(root, self.conda_envs), file)
absfile = os.path.join(root, file)
f.write(absfile, relfile)
return zFile
finally:
f.close()
| {
"repo_name": "NielsZeilemaker/knit",
"path": "knit/env.py",
"copies": "1",
"size": "6466",
"license": "bsd-3-clause",
"hash": 3025558000420495400,
"line_mean": 27.4845814978,
"line_max": 99,
"alpha_frac": 0.559696876,
"autogenerated": false,
"ratio": 3.862604540023895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4922301416023895,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import tarfile
import zipfile
import subprocess
from os.path import dirname, getmtime, getsize, isdir, join
from conda.utils import md5_file
from conda.compat import PY3
from conda_build import external
# Backwards compatibility import. Do not remove.
from conda.install import rm_rf
def copy_into(src, dst):
"Copy all the files and directories in src to the directory dst"
tocopy = os.listdir(src)
for afile in tocopy:
srcname = os.path.join(src, afile)
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
shutil.copytree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
d = d.strip('/').split('/')
if d == ['.']:
d = []
f = dirname(f).split('/')
if f == ['']:
f = []
while d and f and d[0] == f[0]:
d.pop(0)
f.pop(0)
return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
def _check_call(args, **kwargs):
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
sys.exit('Command failed: %s' % ' '.join(args))
def tar_xf(tarball, dir_path, mode='r:*'):
if not PY3 and tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
subprocess.check_call([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
t.extractall(path=dir_path)
t.close()
def unzip(zip_path, dir_path):
z = zipfile.ZipFile(zip_path)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
dp = dirname(path)
if not isdir(dp):
os.makedirs(dp)
with open(path, 'wb') as fo:
fo.write(z.read(name))
z.close()
def file_info(path):
return {'size': getsize(path),
'md5': md5_file(path),
'mtime': getmtime(path)}
| {
"repo_name": "takluyver/conda-build",
"path": "conda_build/utils.py",
"copies": "1",
"size": "2219",
"license": "bsd-3-clause",
"hash": 6269701412845278000,
"line_mean": 24.5057471264,
"line_max": 68,
"alpha_frac": 0.5822442542,
"autogenerated": false,
"ratio": 3.4086021505376345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44908464047376345,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import tarfile
import zipfile
import subprocess
import operator
from os.path import dirname, getmtime, getsize, isdir, join
from collections import defaultdict
from conda.utils import md5_file
from conda.compat import PY3, iteritems
from conda_build import external
# Backwards compatibility import. Do not remove.
from conda.install import rm_rf
rm_rf
def copy_into(src, dst):
"Copy all the files and directories in src to the directory dst"
if not isdir(src):
tocopy = [src]
else:
tocopy = os.listdir(src)
for afile in tocopy:
srcname = os.path.join(src, afile)
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
shutil.copytree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
d = d.strip('/').split('/')
if d == ['.']:
d = []
f = dirname(f).split('/')
if f == ['']:
f = []
while d and f and d[0] == f[0]:
d.pop(0)
f.pop(0)
return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
def _check_call(args, **kwargs):
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
sys.exit('Command failed: %s' % ' '.join(args))
def tar_xf(tarball, dir_path, mode='r:*'):
if tarball.lower().endswith('.tar.z'):
uncompress = external.find_executable('uncompress')
if not uncompress:
uncompress = external.find_executable('gunzip')
if not uncompress:
sys.exit("""\
uncompress (or gunzip) is required to unarchive .z source files.
""")
subprocess.check_call([uncompress, '-f', tarball])
tarball = tarball[:-2]
if not PY3 and tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
subprocess.check_call([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
t.extractall(path=dir_path)
t.close()
def unzip(zip_path, dir_path):
z = zipfile.ZipFile(zip_path)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
dp = dirname(path)
if not isdir(dp):
os.makedirs(dp)
with open(path, 'wb') as fo:
fo.write(z.read(name))
z.close()
def file_info(path):
return {'size': getsize(path),
'md5': md5_file(path),
'mtime': getmtime(path)}
# Taken from toolz
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def comma_join(items):
"""
Like ', '.join(items) but with and
Examples:
>>> comma_join(['a'])
'a'
>>> comma_join(['a', 'b'])
'a and b'
>>> comma_join(['a', 'b', 'c])
'a, b, and c'
"""
return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]
def safe_print_unicode(*args, **kwargs):
"""
prints unicode strings to stdout using configurable `errors` handler for
encoding errors
:param args: unicode strings to print to stdout
:param sep: separator (defaults to ' ')
:param end: ending character (defaults to '\n')
:param errors: error handler for encoding errors (defaults to 'replace')
"""
sep = kwargs.pop('sep', u' ')
end = kwargs.pop('end', u'\n')
errors = kwargs.pop('errors', 'replace')
if PY3:
func = sys.stdout.buffer.write
else:
func = sys.stdout.write
line = sep.join(args) + end
encoding = sys.stdout.encoding or 'utf8'
func(line.encode(encoding, errors))
| {
"repo_name": "sandhujasmine/conda-build",
"path": "conda_build/utils.py",
"copies": "1",
"size": "5174",
"license": "bsd-3-clause",
"hash": 3072997189865280000,
"line_mean": 27.4285714286,
"line_max": 99,
"alpha_frac": 0.5566293003,
"autogenerated": false,
"ratio": 3.449333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4505962633633333,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import tarfile
import zipfile
import subprocess
import operator
import fnmatch
from os.path import dirname, getmtime, getsize, isdir, join
from collections import defaultdict
from conda.utils import md5_file
from conda.compat import PY3, iteritems
from conda_build import external
# Backwards compatibility import. Do not remove.
from conda.install import rm_rf
rm_rf
def copy_into(src, dst):
"Copy all the files and directories in src to the directory dst"
if not isdir(src):
tocopy = [src]
else:
tocopy = os.listdir(src)
for afile in tocopy:
srcname = os.path.join(src, afile)
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
shutil.copytree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
d = d.strip('/').split('/')
if d == ['.']:
d = []
f = dirname(f).split('/')
if f == ['']:
f = []
while d and f and d[0] == f[0]:
d.pop(0)
f.pop(0)
return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
def _check_call(args, **kwargs):
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
sys.exit('Command failed: %s' % ' '.join(args))
def tar_xf(tarball, dir_path, mode='r:*'):
if tarball.lower().endswith('.tar.z'):
uncompress = external.find_executable('uncompress')
if not uncompress:
uncompress = external.find_executable('gunzip')
if not uncompress:
sys.exit("""\
uncompress (or gunzip) is required to unarchive .z source files.
""")
subprocess.check_call([uncompress, '-f', tarball])
tarball = tarball[:-2]
if not PY3 and tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
subprocess.check_call([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
t.extractall(path=dir_path)
t.close()
def unzip(zip_path, dir_path):
z = zipfile.ZipFile(zip_path)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
dp = dirname(path)
if not isdir(dp):
os.makedirs(dp)
with open(path, 'wb') as fo:
fo.write(z.read(name))
z.close()
def file_info(path):
return {'size': getsize(path),
'md5': md5_file(path),
'mtime': getmtime(path)}
# Taken from toolz
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def comma_join(items):
"""
Like ', '.join(items) but with and
Examples:
>>> comma_join(['a'])
'a'
>>> comma_join(['a', 'b'])
'a and b'
>>> comma_join(['a', 'b', 'c])
'a, b, and c'
"""
return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]
def safe_print_unicode(*args, **kwargs):
"""
prints unicode strings to stdout using configurable `errors` handler for
encoding errors
:param args: unicode strings to print to stdout
:param sep: separator (defaults to ' ')
:param end: ending character (defaults to '\n')
:param errors: error handler for encoding errors (defaults to 'replace')
"""
sep = kwargs.pop('sep', u' ')
end = kwargs.pop('end', u'\n')
errors = kwargs.pop('errors', 'replace')
if PY3:
func = sys.stdout.buffer.write
else:
func = sys.stdout.write
line = sep.join(args) + end
encoding = sys.stdout.encoding or 'utf8'
func(line.encode(encoding, errors))
def rec_glob(path, patterns):
result = []
for d_f in os.walk(path):
m = []
for pattern in patterns:
m.extend(fnmatch.filter(d_f[2], pattern))
if m:
result.extend([os.path.join(d_f[0], f) for f in m])
return result
| {
"repo_name": "ilastik/conda-build",
"path": "conda_build/utils.py",
"copies": "2",
"size": "5465",
"license": "bsd-3-clause",
"hash": -2602475588564392000,
"line_mean": 27.1701030928,
"line_max": 99,
"alpha_frac": 0.5557182068,
"autogenerated": false,
"ratio": 3.441435768261965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49971539750619653,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import tarfile
import zipfile
import subprocess
import operator
import time
from os.path import dirname, getmtime, getsize, isdir, join
from collections import defaultdict
from conda.utils import md5_file
from conda.compat import PY3, iteritems
from conda_build import external
# Backwards compatibility import. Do not remove.
from conda.install import rm_rf
rm_rf
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def copy_into(src, dst):
"Copy all the files and directories in src to the directory dst"
if not isdir(src):
tocopy = [src]
else:
tocopy = os.listdir(src)
for afile in tocopy:
srcname = os.path.join(src, afile)
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
shutil.copytree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
d = d.strip('/').split('/')
if d == ['.']:
d = []
f = dirname(f).split('/')
if f == ['']:
f = []
while d and f and d[0] == f[0]:
d.pop(0)
f.pop(0)
return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
def _check_call(args, **kwargs):
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
sys.exit('Command failed: %s' % ' '.join(args))
def tar_xf(tarball, dir_path, mode='r:*'):
if tarball.lower().endswith('.tar.z'):
uncompress = external.find_executable('uncompress')
if not uncompress:
sys.exit("""\
uncompress is required to unarchive .z source files.
""")
subprocess.check_call([uncompress, '-f', tarball])
tarball = tarball[:-2]
if not PY3 and tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
subprocess.check_call([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
t.extractall(path=dir_path)
t.close()
def unzip(zip_path, dir_path):
z = zipfile.ZipFile(zip_path)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
dp = dirname(path)
if not isdir(dp):
os.makedirs(dp)
with open(path, 'wb') as fo:
fo.write(z.read(name))
z.close()
def file_info(path):
return {'size': getsize(path),
'md5': md5_file(path),
'mtime': getmtime(path)}
# Taken from toolz
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def comma_join(items):
"""
Like ', '.join(items) but with and
Examples:
>>> comma_join(['a'])
'a'
>>> comma_join(['a', 'b'])
'a and b'
>>> comma_join(['a', 'b', 'c])
'a, b, and c'
"""
return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', False)
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
| {
"repo_name": "rmcgibbo/conda-build",
"path": "conda_build/utils.py",
"copies": "2",
"size": "7618",
"license": "bsd-3-clause",
"hash": 8918207048973063000,
"line_mean": 31.0084033613,
"line_max": 99,
"alpha_frac": 0.545287477,
"autogenerated": false,
"ratio": 3.9146968139773897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006285214314558776,
"num_lines": 238
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.